Upgrade dependent version: github.com/open-policy-agent/opa (#5315)

Upgrade dependent version: github.com/open-policy-agent/opa v0.18.0 -> v0.45.0

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
hongzhouzi
2022-10-31 10:58:55 +08:00
committed by GitHub
parent 668fca1773
commit ef03b1e3df
363 changed files with 277341 additions and 13544 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -2,27 +2,71 @@ package bundle
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/open-policy-agent/opa/loader/filter"
"github.com/open-policy-agent/opa/storage"
)
// Descriptor contains information about a file and
// can be used to read the file contents.
type Descriptor struct {
url string
path string
reader io.Reader
closer io.Closer
closeOnce *sync.Once
}
func newDescriptor(path string, reader io.Reader) *Descriptor {
// lazyFile defers reading the file until the first call of Read
type lazyFile struct {
path string
file *os.File
}
// newLazyFile creates a new instance of lazyFile
func newLazyFile(path string) *lazyFile {
return &lazyFile{path: path}
}
// Read implements io.Reader. It will check if the file has been opened
// and open it if it has not before attempting to read using the file's
// read method
func (f *lazyFile) Read(b []byte) (int, error) {
var err error
if f.file == nil {
if f.file, err = os.Open(f.path); err != nil {
return 0, fmt.Errorf("failed to open file %s: %w", f.path, err)
}
}
return f.file.Read(b)
}
// Close closes the lazy file if it has been opened using the file's
// close method
func (f *lazyFile) Close() error {
if f.file != nil {
return f.file.Close()
}
return nil
}
func newDescriptor(url, path string, reader io.Reader) *Descriptor {
return &Descriptor{
url: url,
path: path,
reader: reader,
}
@@ -39,6 +83,11 @@ func (d *Descriptor) Path() string {
return d.path
}
// URL returns the url of the file.
func (d *Descriptor) URL() string {
return d.url
}
// Read will read all the contents from the file the Descriptor refers to
// into the dest writer up n bytes. Will return an io.EOF error
// if EOF is encountered before n bytes are read.
@@ -65,23 +114,45 @@ type DirectoryLoader interface {
// NextFile must return io.EOF if there is no next value. The returned
// descriptor should *always* be closed when no longer needed.
NextFile() (*Descriptor, error)
WithFilter(filter filter.LoaderFilter) DirectoryLoader
}
type dirLoader struct {
root string
files []string
idx int
root string
files []string
idx int
filter filter.LoaderFilter
}
// NewDirectoryLoader returns a basic DirectoryLoader implementation
// that will load files from a given root directory path.
func NewDirectoryLoader(root string) DirectoryLoader {
if len(root) > 1 {
// Normalize relative directories, ex "./src/bundle" -> "src/bundle"
// We don't need an absolute path, but this makes the joined/trimmed
// paths more uniform.
if root[0] == '.' && root[1] == filepath.Separator {
if len(root) == 2 {
root = root[:1] // "./" -> "."
} else {
root = root[2:] // remove leading "./"
}
}
}
d := dirLoader{
root: root,
}
return &d
}
// WithFilter specifies the filter object to use to filter files while loading bundles
func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
d.filter = filter
return d
}
// NextFile iterates to the next file in the directory tree
// and returns a file Descriptor for the file.
func (d *dirLoader) NextFile() (*Descriptor, error) {
@@ -90,12 +161,19 @@ func (d *dirLoader) NextFile() (*Descriptor, error) {
d.files = []string{}
err := filepath.Walk(d.root, func(path string, info os.FileInfo, err error) error {
if info != nil && info.Mode().IsRegular() {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
return nil
}
d.files = append(d.files, filepath.ToSlash(path))
} else if info != nil && info.Mode().IsDir() {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
return filepath.SkipDir
}
}
return nil
})
if err != nil {
return nil, errors.Wrap(err, "failed to list files")
return nil, fmt.Errorf("failed to list files: %w", err)
}
}
@@ -107,28 +185,40 @@ func (d *dirLoader) NextFile() (*Descriptor, error) {
fileName := d.files[d.idx]
d.idx++
fh, err := os.Open(fileName)
if err != nil {
return nil, errors.Wrapf(err, "failed to open file %s", fileName)
}
fh := newLazyFile(fileName)
// Trim off the root directory and return path as if chrooted
cleanedPath := strings.TrimPrefix(fileName, d.root)
if d.root == "." && filepath.Base(fileName) == ManifestExt {
cleanedPath = fileName
}
if !strings.HasPrefix(cleanedPath, "/") {
cleanedPath = "/" + cleanedPath
}
f := newDescriptor(cleanedPath, fh).withCloser(fh)
f := newDescriptor(path.Join(d.root, cleanedPath), cleanedPath, fh).withCloser(fh)
return f, nil
}
type tarballLoader struct {
r io.Reader
tr *tar.Reader
baseURL string
r io.Reader
tr *tar.Reader
files []file
idx int
filter filter.LoaderFilter
skipDir map[string]struct{}
}
// NewTarballLoader returns a new DirectoryLoader that reads
// files out of a gzipped tar archive.
type file struct {
name string
reader io.Reader
path storage.Path
raw []byte
}
// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead.
func NewTarballLoader(r io.Reader) DirectoryLoader {
l := tarballLoader{
r: r,
@@ -136,31 +226,188 @@ func NewTarballLoader(r io.Reader) DirectoryLoader {
return &l
}
// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads
// files out of a gzipped tar archive. The file URLs will be prefixed
// with the baseURL.
func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader {
l := tarballLoader{
baseURL: strings.TrimSuffix(baseURL, "/"),
r: r,
}
return &l
}
// WithFilter specifies the filter object to use to filter files while loading bundles
func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
t.filter = filter
return t
}
// NextFile iterates to the next file in the directory tree
// and returns a file Descriptor for the file.
func (t *tarballLoader) NextFile() (*Descriptor, error) {
if t.tr == nil {
gr, err := gzip.NewReader(t.r)
if err != nil {
return nil, errors.Wrap(err, "archive read failed")
return nil, fmt.Errorf("archive read failed: %w", err)
}
t.tr = tar.NewReader(gr)
}
for {
header, err := t.tr.Next()
// Eventually we will get an io.EOF error when finished
// iterating through the archive
if err != nil {
return nil, err
if t.files == nil {
t.files = []file{}
if t.skipDir == nil {
t.skipDir = map[string]struct{}{}
}
// Keep iterating on the archive until we find a normal file
if header.Typeflag == tar.TypeReg {
// no need to close this descriptor after reading
f := newDescriptor(header.Name, t.tr)
return f, nil
for {
header, err := t.tr.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
// Keep iterating on the archive until we find a normal file
if header.Typeflag == tar.TypeReg {
if t.filter != nil {
if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) {
continue
}
basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/")
// check if the directory is to be skipped
if _, ok := t.skipDir[basePath]; ok {
continue
}
match := false
for p := range t.skipDir {
if strings.HasPrefix(basePath, p) {
match = true
break
}
}
if match {
continue
}
}
f := file{name: header.Name}
var buf bytes.Buffer
if _, err := io.Copy(&buf, t.tr); err != nil {
return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err)
}
f.reader = &buf
t.files = append(t.files, f)
} else if header.Typeflag == tar.TypeDir {
cleanedPath := filepath.ToSlash(header.Name)
if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) {
t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{}
}
}
}
}
// If done reading files then just return io.EOF
// errors for each NextFile() call
if t.idx >= len(t.files) {
return nil, io.EOF
}
f := t.files[t.idx]
t.idx++
return newDescriptor(path.Join(t.baseURL, f.name), f.name, f.reader), nil
}
// Next implements the storage.Iterator interface.
// It iterates to the next policy or data file in the directory tree
// and returns a storage.Update for the file.
func (it *iterator) Next() (*storage.Update, error) {
if it.files == nil {
it.files = []file{}
for _, item := range it.raw {
f := file{name: item.Path}
fpath := strings.TrimLeft(filepath.ToSlash(filepath.Dir(f.name)), "/.")
if strings.HasSuffix(f.name, RegoExt) {
fpath = strings.Trim(f.name, "/")
}
p, ok := storage.ParsePathEscaped("/" + fpath)
if !ok {
return nil, fmt.Errorf("storage path invalid: %v", f.name)
}
f.path = p
f.raw = item.Value
it.files = append(it.files, f)
}
sortFilePathAscend(it.files)
}
// If done reading files then just return io.EOF
// errors for each NextFile() call
if it.idx >= len(it.files) {
return nil, io.EOF
}
f := it.files[it.idx]
it.idx++
isPolicy := false
if strings.HasSuffix(f.name, RegoExt) {
isPolicy = true
}
return &storage.Update{
Path: f.path,
Value: f.raw,
IsPolicy: isPolicy,
}, nil
}
type iterator struct {
raw []Raw
files []file
idx int
}
func NewIterator(raw []Raw) storage.Iterator {
it := iterator{
raw: raw,
}
return &it
}
func sortFilePathAscend(files []file) {
sort.Slice(files, func(i, j int) bool {
return len(files[i].path) < len(files[j].path)
})
}
func getdepth(path string, isDir bool) int {
if isDir {
cleanedPath := strings.Trim(filepath.ToSlash(path), "/")
return len(strings.Split(cleanedPath, "/"))
}
basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/")
return len(strings.Split(basePath, "/"))
}

View File

@@ -0,0 +1,100 @@
//go:build go1.16
// +build go1.16
package bundle
import (
"fmt"
"io"
"io/fs"
"path/filepath"
"sync"
"github.com/open-policy-agent/opa/loader/filter"
)
const (
defaultFSLoaderRoot = "."
)
type dirLoaderFS struct {
sync.Mutex
filesystem fs.FS
files []string
idx int
filter filter.LoaderFilter
}
// NewFSLoader returns a basic DirectoryLoader implementation
// that will load files from a fs.FS interface
func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) {
d := dirLoaderFS{
filesystem: filesystem,
}
return &d, nil
}
func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error {
if err != nil {
return err
}
if dirEntry != nil {
info, err := dirEntry.Info()
if err != nil {
return err
}
if dirEntry.Type().IsRegular() {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
return nil
}
d.files = append(d.files, path)
} else if dirEntry.Type().IsDir() {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
return fs.SkipDir
}
}
}
return nil
}
// WithFilter specifies the filter object to use to filter files while loading bundles
func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
d.filter = filter
return d
}
// NextFile iterates to the next file in the directory tree
// and returns a file Descriptor for the file.
func (d *dirLoaderFS) NextFile() (*Descriptor, error) {
d.Lock()
defer d.Unlock()
if d.files == nil {
err := fs.WalkDir(d.filesystem, defaultFSLoaderRoot, d.walkDir)
if err != nil {
return nil, fmt.Errorf("failed to list files: %w", err)
}
}
// If done reading files then just return io.EOF
// errors for each NextFile() call
if d.idx >= len(d.files) {
return nil, io.EOF
}
fileName := d.files[d.idx]
d.idx++
fh, err := d.filesystem.Open(fileName)
if err != nil {
return nil, fmt.Errorf("failed to open file %s: %w", fileName, err)
}
fileNameWithSlash := fmt.Sprintf("/%s", fileName)
f := newDescriptor(fileNameWithSlash, fileNameWithSlash, fh).withCloser(fh)
return f, nil
}

141
vendor/github.com/open-policy-agent/opa/bundle/hash.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
// Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package bundle
import (
"bytes"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/json"
"fmt"
"hash"
"io"
"sort"
"strings"
)
// HashingAlgorithm represents a subset of hashing algorithms implemented in Go
type HashingAlgorithm string
// Supported values for HashingAlgorithm
const (
MD5 HashingAlgorithm = "MD5"
SHA1 HashingAlgorithm = "SHA-1"
SHA224 HashingAlgorithm = "SHA-224"
SHA256 HashingAlgorithm = "SHA-256"
SHA384 HashingAlgorithm = "SHA-384"
SHA512 HashingAlgorithm = "SHA-512"
SHA512224 HashingAlgorithm = "SHA-512-224"
SHA512256 HashingAlgorithm = "SHA-512-256"
)
// String returns the string representation of a HashingAlgorithm
func (alg HashingAlgorithm) String() string {
return string(alg)
}
// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy
type SignatureHasher interface {
HashFile(v interface{}) ([]byte, error)
}
type hasher struct {
h func() hash.Hash // hash function factory
}
// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm
func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) {
h := &hasher{}
switch alg {
case MD5:
h.h = md5.New
case SHA1:
h.h = sha1.New
case SHA224:
h.h = sha256.New224
case SHA256:
h.h = sha256.New
case SHA384:
h.h = sha512.New384
case SHA512:
h.h = sha512.New
case SHA512224:
h.h = sha512.New512_224
case SHA512256:
h.h = sha512.New512_256
default:
return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg)
}
return h, nil
}
// HashFile hashes the file content, JSON or binary, both in golang native format.
func (h *hasher) HashFile(v interface{}) ([]byte, error) {
hf := h.h()
walk(v, hf)
return hf.Sum(nil), nil
}
// walk hashes the file content, JSON or binary, both in golang native format.
//
// Computation for unstructured documents is a hash of the document.
//
// Computation for the types of structured JSON document is as follows:
//
// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }.
//
// array: Hash [, then digest of the value, then comma (between items) and finally ].
func walk(v interface{}, h io.Writer) {
switch x := v.(type) {
case map[string]interface{}:
_, _ = h.Write([]byte("{"))
var keys []string
for k := range x {
keys = append(keys, k)
}
sort.Strings(keys)
for i, key := range keys {
if i > 0 {
_, _ = h.Write([]byte(","))
}
_, _ = h.Write(encodePrimitive(key))
_, _ = h.Write([]byte(":"))
walk(x[key], h)
}
_, _ = h.Write([]byte("}"))
case []interface{}:
_, _ = h.Write([]byte("["))
for i, e := range x {
if i > 0 {
_, _ = h.Write([]byte(","))
}
walk(e, h)
}
_, _ = h.Write([]byte("]"))
case []byte:
_, _ = h.Write(x)
default:
_, _ = h.Write(encodePrimitive(x))
}
}
func encodePrimitive(v interface{}) []byte {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
_ = encoder.Encode(v)
return []byte(strings.Trim(buf.String(), "\n"))
}

145
vendor/github.com/open-policy-agent/opa/bundle/keys.go generated vendored Normal file
View File

@@ -0,0 +1,145 @@
// Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
// Package bundle provide helpers that assist in creating the verification and signing key configuration
package bundle
import (
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
"github.com/open-policy-agent/opa/internal/jwx/jws/sign"
"github.com/open-policy-agent/opa/keys"
"github.com/open-policy-agent/opa/util"
)
const (
defaultTokenSigningAlg = "RS256"
)
// KeyConfig holds the keys used to sign or verify bundles and tokens
// Moved to own package, alias kept for backwards compatibility
type KeyConfig = keys.Config
// VerificationConfig represents the key configuration used to verify a signed bundle
type VerificationConfig struct {
PublicKeys map[string]*KeyConfig
KeyID string `json:"keyid"`
Scope string `json:"scope"`
Exclude []string `json:"exclude_files"`
}
// NewVerificationConfig return a new VerificationConfig
func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig {
return &VerificationConfig{
PublicKeys: keys,
KeyID: id,
Scope: scope,
Exclude: exclude,
}
}
// ValidateAndInjectDefaults validates the config and inserts default values
func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error {
vc.PublicKeys = keys
if vc.KeyID != "" {
found := false
for key := range keys {
if key == vc.KeyID {
found = true
break
}
}
if !found {
return fmt.Errorf("key id %s not found", vc.KeyID)
}
}
return nil
}
// GetPublicKey returns the public key corresponding to the given key id
func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) {
var kc *KeyConfig
var ok bool
if kc, ok = vc.PublicKeys[id]; !ok {
return nil, fmt.Errorf("verification key corresponding to ID %v not found", id)
}
return kc, nil
}
// SigningConfig represents the key configuration used to generate a signed bundle
type SigningConfig struct {
Plugin string
Key string
Algorithm string
ClaimsPath string
}
// NewSigningConfig return a new SigningConfig
func NewSigningConfig(key, alg, claimsPath string) *SigningConfig {
if alg == "" {
alg = defaultTokenSigningAlg
}
return &SigningConfig{
Plugin: defaultSignerID,
Key: key,
Algorithm: alg,
ClaimsPath: claimsPath,
}
}
// WithPlugin sets the signing plugin in the signing config
func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig {
if plugin != "" {
s.Plugin = plugin
}
return s
}
// GetPrivateKey returns the private key or secret from the signing config
func (s *SigningConfig) GetPrivateKey() (interface{}, error) {
block, _ := pem.Decode([]byte(s.Key))
if block != nil {
return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm))
}
var priv string
if _, err := os.Stat(s.Key); err == nil {
bs, err := ioutil.ReadFile(s.Key)
if err != nil {
return nil, err
}
priv = string(bs)
} else if os.IsNotExist(err) {
priv = s.Key
} else {
return nil, err
}
return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm))
}
// GetClaims returns the claims by reading the file specified in the signing config
func (s *SigningConfig) GetClaims() (map[string]interface{}, error) {
var claims map[string]interface{}
bs, err := ioutil.ReadFile(s.ClaimsPath)
if err != nil {
return claims, err
}
if err := util.UnmarshalJSON(bs, &claims); err != nil {
return claims, err
}
return claims, nil
}

135
vendor/github.com/open-policy-agent/opa/bundle/sign.go generated vendored Normal file
View File

@@ -0,0 +1,135 @@
// Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
// Package bundle provide helpers that assist in the creating a signed bundle
package bundle
import (
"crypto/rand"
"encoding/json"
"fmt"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
"github.com/open-policy-agent/opa/internal/jwx/jws"
)
const defaultSignerID = "_default"
var signers map[string]Signer
// Signer is the interface expected for implementations that generate bundle signatures.
type Signer interface {
GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error)
}
// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified
// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates
// a signed token given the list of files to be included in the payload and the bundle
// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token.
func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
var plugin string
// for backwards compatibility, check if there is no plugin specified, and use default
if sc.Plugin == "" {
plugin = defaultSignerID
} else {
plugin = sc.Plugin
}
signer, err := GetSigner(plugin)
if err != nil {
return "", err
}
return signer.GenerateSignedToken(files, sc, keyID)
}
// DefaultSigner is the default bundle signing implementation. It signs bundles by generating
// a JWT and signing it using a locally-accessible private key.
type DefaultSigner struct{}
// GenerateSignedToken generates a signed token given the list of files to be
// included in the payload and the bundle signing config. The keyID if non-empty,
// represents the value for the "keyid" claim in the token
func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
payload, err := generatePayload(files, sc, keyID)
if err != nil {
return "", err
}
privateKey, err := sc.GetPrivateKey()
if err != nil {
return "", err
}
var headers jws.StandardHeaders
if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil {
return "", err
}
if keyID != "" {
if err := headers.Set(jws.KeyIDKey, keyID); err != nil {
return "", err
}
}
hdr, err := json.Marshal(headers)
if err != nil {
return "", err
}
token, err := jws.SignLiteral(payload,
jwa.SignatureAlgorithm(sc.Algorithm),
privateKey,
hdr,
rand.Reader)
if err != nil {
return "", err
}
return string(token), nil
}
func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) {
payload := make(map[string]interface{})
payload["files"] = files
if sc.ClaimsPath != "" {
claims, err := sc.GetClaims()
if err != nil {
return nil, err
}
for claim, value := range claims {
payload[claim] = value
}
} else {
if keyID != "" {
// keyid claim is deprecated but include it for backwards compatibility.
payload["keyid"] = keyID
}
}
return json.Marshal(payload)
}
// GetSigner returns the Signer registered under the given id
func GetSigner(id string) (Signer, error) {
signer, ok := signers[id]
if !ok {
return nil, fmt.Errorf("no signer exists under id %s", id)
}
return signer, nil
}
// RegisterSigner registers a Signer under the given id
func RegisterSigner(id string, s Signer) error {
if id == defaultSignerID {
return fmt.Errorf("signer id %s is reserved, use a different id", id)
}
signers[id] = s
return nil
}
func init() {
signers = map[string]Signer{
defaultSignerID: &DefaultSigner{},
}
}

View File

@@ -6,41 +6,61 @@ package bundle
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"github.com/open-policy-agent/opa/metrics"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/internal/json/patch"
"github.com/open-policy-agent/opa/metrics"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/util"
)
var bundlesBasePath = storage.MustParsePath("/system/bundles")
// BundlesBasePath is the storage path used for storing bundle metadata
var BundlesBasePath = storage.MustParsePath("/system/bundles")
// Note: As needed these helpers could be memoized.
// ManifestStoragePath is the storage path used for the given named bundle manifest.
func ManifestStoragePath(name string) storage.Path {
return append(bundlesBasePath, name, "manifest")
return append(BundlesBasePath, name, "manifest")
}
// EtagStoragePath is the storage path used for the given named bundle etag.
func EtagStoragePath(name string) storage.Path {
return append(BundlesBasePath, name, "etag")
}
func namedBundlePath(name string) storage.Path {
return append(bundlesBasePath, name)
return append(BundlesBasePath, name)
}
func rootsPath(name string) storage.Path {
return append(bundlesBasePath, name, "manifest", "roots")
return append(BundlesBasePath, name, "manifest", "roots")
}
func revisionPath(name string) storage.Path {
return append(bundlesBasePath, name, "manifest", "revision")
return append(BundlesBasePath, name, "manifest", "revision")
}
func wasmModulePath(name string) storage.Path {
return append(BundlesBasePath, name, "wasm")
}
func wasmEntrypointsPath(name string) storage.Path {
return append(BundlesBasePath, name, "manifest", "wasm")
}
func metadataPath(name string) storage.Path {
return append(BundlesBasePath, name, "manifest", "metadata")
}
// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored.
func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) {
value, err := store.Read(ctx, txn, bundlesBasePath)
value, err := store.Read(ctx, txn, BundlesBasePath)
if err != nil {
return nil, err
}
@@ -65,8 +85,12 @@ func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.
return write(ctx, store, txn, ManifestStoragePath(name), manifest)
}
func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, manifest Manifest) error {
var value interface{} = manifest
// WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated.
func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error {
return write(ctx, store, txn, EtagStoragePath(name), etag)
}
func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error {
if err := util.RoundTrip(&value); err != nil {
return err
}
@@ -88,12 +112,94 @@ func write(ctx context.Context, store storage.Store, txn storage.Transaction, pa
func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
path := namedBundlePath(name)
err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
if err != nil && !storage.IsNotFound(err) {
return err
return suppressNotFound(err)
}
// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called
// when the bundle is deactivated.
func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
path := EtagStoragePath(name)
err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
return suppressNotFound(err)
}
func suppressNotFound(err error) error {
if err == nil || storage.IsNotFound(err) {
return nil
}
return err
}
func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error {
basePath := wasmModulePath(name)
for _, wm := range b.WasmModules {
path := append(basePath, wm.Path)
err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw))
if err != nil {
return err
}
}
return nil
}
func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
path := wasmModulePath(name)
err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
return suppressNotFound(err)
}
// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store.
func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) {
path := wasmEntrypointsPath(name)
value, err := store.Read(ctx, txn, path)
if err != nil {
return nil, err
}
bs, err := json.Marshal(value)
if err != nil {
return nil, fmt.Errorf("corrupt wasm manifest data")
}
var wasmMetadata []WasmResolver
err = util.UnmarshalJSON(bs, &wasmMetadata)
if err != nil {
return nil, fmt.Errorf("corrupt wasm manifest data")
}
return wasmMetadata, nil
}
// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store.
func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) {
path := wasmModulePath(name)
value, err := store.Read(ctx, txn, path)
if err != nil {
return nil, err
}
encodedModules, ok := value.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("corrupt wasm modules")
}
rawModules := map[string][]byte{}
for path, enc := range encodedModules {
encStr, ok := enc.(string)
if !ok {
return nil, fmt.Errorf("corrupt wasm modules")
}
bs, err := base64.StdEncoding.DecodeString(encStr)
if err != nil {
return nil, err
}
rawModules[path] = bs
}
return rawModules, nil
}
// ReadBundleRootsFromStore returns the roots in the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
@@ -141,11 +247,54 @@ func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage
return str, nil
}
// ReadBundleMetadataFromStore returns the metadata in the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) {
return readMetadataFromStore(ctx, store, txn, metadataPath(name))
}
func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) {
value, err := store.Read(ctx, txn, path)
if err != nil {
return nil, suppressNotFound(err)
}
data, ok := value.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("corrupt manifest metadata")
}
return data, nil
}
// ReadBundleEtagFromStore returns the etag for the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) {
return readEtagFromStore(ctx, store, txn, EtagStoragePath(name))
}
func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
value, err := store.Read(ctx, txn, path)
if err != nil {
return "", err
}
str, ok := value.(string)
if !ok {
return "", fmt.Errorf("corrupt bundle etag")
}
return str, nil
}
// ActivateOpts defines options for the Activate API call.
type ActivateOpts struct {
Ctx context.Context
Store storage.Store
Txn storage.Transaction
TxnCtx *storage.Context
Compiler *ast.Compiler
Metrics metrics.Metrics
Bundles map[string]*Bundle // Optional
@@ -173,13 +322,13 @@ type DeactivateOpts struct {
func Deactivate(opts *DeactivateOpts) error {
erase := map[string]struct{}{}
for name := range opts.BundleNames {
if roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name); err == nil {
for _, root := range roots {
erase[root] = struct{}{}
}
} else if !storage.IsNotFound(err) {
roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name)
if suppressNotFound(err) != nil {
return err
}
for _, root := range roots {
erase[root] = struct{}{}
}
}
_, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.BundleNames, erase)
return err
@@ -190,21 +339,28 @@ func activateBundles(opts *ActivateOpts) error {
// Build collections of bundle names, modules, and roots to erase
erase := map[string]struct{}{}
names := map[string]struct{}{}
deltaBundles := map[string]*Bundle{}
snapshotBundles := map[string]*Bundle{}
for name, b := range opts.Bundles {
names[name] = struct{}{}
if b.Type() == DeltaBundleType {
deltaBundles[name] = b
} else {
snapshotBundles[name] = b
names[name] = struct{}{}
if roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name); err == nil {
roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name)
if suppressNotFound(err) != nil {
return err
}
for _, root := range roots {
erase[root] = struct{}{}
}
} else if !storage.IsNotFound(err) {
return err
}
// Erase data at new roots to prepare for writing the new data
for _, root := range *b.Manifest.Roots {
erase[root] = struct{}{}
// Erase data at new roots to prepare for writing the new data
for _, root := range *b.Manifest.Roots {
erase[root] = struct{}{}
}
}
}
@@ -215,23 +371,77 @@ func activateBundles(opts *ActivateOpts) error {
return err
}
if len(deltaBundles) != 0 {
err := activateDeltaBundles(opts, deltaBundles)
if err != nil {
return err
}
}
// Erase data and policies at new + old roots, and remove the old
// manifests before activating a new bundles.
// manifests before activating a new snapshot bundle.
remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, names, erase)
if err != nil {
return err
}
for _, b := range opts.Bundles {
// Write data from each new bundle into the store. Only write under the
// roots contained in their manifest. This should be done *before* the
// policies so that path conflict checks can occur.
if err := writeData(opts.Ctx, opts.Store, opts.Txn, *b.Manifest.Roots, b.Data); err != nil {
return err
// Validate data in bundle does not contain paths outside the bundle's roots.
for _, b := range snapshotBundles {
if b.lazyLoadingMode {
for _, item := range b.Raw {
path := filepath.ToSlash(item.Path)
if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile {
var val map[string]json.RawMessage
err = util.Unmarshal(item.Value, &val)
if err == nil {
err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots)
if err != nil {
return err
}
} else {
// Build an object for the value
p := getNormalizedPath(path)
if len(p) == 0 {
return fmt.Errorf("root value must be object")
}
// verify valid YAML or JSON value
var x interface{}
err := util.Unmarshal(item.Value, &x)
if err != nil {
return err
}
value := item.Value
dir := map[string]json.RawMessage{}
for i := len(p) - 1; i > 0; i-- {
dir[p[i]] = value
bs, err := json.Marshal(dir)
if err != nil {
return err
}
value = bs
dir = map[string]json.RawMessage{}
}
dir[p[0]] = value
err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots)
if err != nil {
return err
}
}
}
}
}
}
// Write and compile the modules all at once to avoid having to re-do work.
// Compile the modules all at once to avoid having to re-do work.
remainingAndExtra := make(map[string]*ast.Module)
for name, mod := range remaining {
remainingAndExtra[name] = mod
@@ -240,22 +450,132 @@ func activateBundles(opts *ActivateOpts) error {
remainingAndExtra[name] = mod
}
err = writeModules(opts.Ctx, opts.Store, opts.Txn, opts.Compiler, opts.Metrics, opts.Bundles, remainingAndExtra, opts.legacy)
err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy)
if err != nil {
return err
}
for name, b := range opts.Bundles {
// Always write manifests to the named location. If the plugin is in the older style config
// then also write to the old legacy unnamed location.
if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, b.Manifest); err != nil {
if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy); err != nil {
return err
}
if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 {
return err
}
for name, b := range snapshotBundles {
if err := writeManifestToStore(opts, name, b.Manifest); err != nil {
return err
}
if opts.legacy {
if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, b.Manifest); err != nil {
return err
if err := writeEtagToStore(opts, name, b.Etag); err != nil {
return err
}
if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil {
return err
}
}
return nil
}
func doDFS(obj map[string]json.RawMessage, path string, roots []string) error {
if len(roots) == 1 && roots[0] == "" {
return nil
}
for key := range obj {
newPath := filepath.Join(strings.Trim(path, "/"), key)
// Note: filepath.Join can return paths with '\' separators, always use
// filepath.ToSlash to keep them normalized.
newPath = strings.TrimLeft(filepath.ToSlash(newPath), "/.")
contains := false
prefix := false
if RootPathsContain(roots, newPath) {
contains = true
} else {
for i := range roots {
if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) {
prefix = true
break
}
}
}
if !contains && !prefix {
return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath)
}
if contains {
continue
}
var next map[string]json.RawMessage
err := util.Unmarshal(obj[key], &next)
if err != nil {
return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath)
}
if err := doDFS(next, newPath, roots); err != nil {
return err
}
}
return nil
}
func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error {
// Check that the manifest roots and wasm resolvers in the delta bundle
// match with those currently in the store
for name, b := range bundles {
value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name))
if err != nil {
if storage.IsNotFound(err) {
continue
}
return err
}
bs, err := json.Marshal(value)
if err != nil {
return fmt.Errorf("corrupt manifest data: %w", err)
}
var manifest Manifest
err = util.UnmarshalJSON(bs, &manifest)
if err != nil {
return fmt.Errorf("corrupt manifest data: %w", err)
}
if !b.Manifest.equalWasmResolversAndRoots(manifest) {
return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name)
}
}
for _, b := range bundles {
err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data)
if err != nil {
return err
}
}
if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 {
return err
}
for name, b := range bundles {
if err := writeManifestToStore(opts, name, b.Manifest); err != nil {
return err
}
if err := writeEtagToStore(opts, name, b.Etag); err != nil {
return err
}
}
return nil
@@ -275,11 +595,19 @@ func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transact
}
for name := range names {
if err := EraseManifestFromStore(ctx, store, txn, name); err != nil && !storage.IsNotFound(err) {
if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
return nil, err
}
if err := LegacyEraseManifestFromStore(ctx, store, txn); err != nil && !storage.IsNotFound(err) {
if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil {
return nil, err
}
if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
return nil, err
}
if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil {
return nil, err
}
}
@@ -293,11 +621,10 @@ func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction
if !ok {
return fmt.Errorf("manifest root path invalid: %v", root)
}
if len(path) > 0 {
if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); err != nil {
if !storage.IsNotFound(err) {
return err
}
if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil {
return err
}
}
}
@@ -328,7 +655,7 @@ func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transac
}
deleted := false
for root := range roots {
if strings.HasPrefix(path, root) {
if RootPathsContain([]string{root}, path) {
if err := store.DeletePolicy(ctx, txn, id); err != nil {
return nil, err
}
@@ -344,6 +671,70 @@ func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transac
return remaining, nil
}
func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error {
// Always write manifests to the named location. If the plugin is in the older style config
// then also write to the old legacy unnamed location.
if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil {
return err
}
if opts.legacy {
if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil {
return err
}
}
return nil
}
func writeEtagToStore(opts *ActivateOpts, name, etag string) error {
if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil {
return err
}
return nil
}
func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool) error {
params := storage.WriteParams
params.Context = txnCtx
for name, b := range bundles {
if len(b.Raw) == 0 {
// Write data from each new bundle into the store. Only write under the
// roots contained in their manifest.
if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil {
return err
}
for _, mf := range b.Modules {
var path string
// For backwards compatibility, in legacy mode, upsert policies to
// the unprefixed path.
if legacy {
path = mf.Path
} else {
path = modulePathWithPrefix(name, mf.Path)
}
if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil {
return err
}
}
} else {
params.BasePaths = *b.Manifest.Roots
err := store.Truncate(ctx, txn, params, NewIterator(b.Raw))
if err != nil {
return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err)
}
}
}
return nil
}
func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error {
for _, root := range roots {
path, ok := storage.ParsePathEscaped("/" + root)
@@ -364,6 +755,43 @@ func writeData(ctx context.Context, store storage.Store, txn storage.Transaction
return nil
}
func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error {
m.Timer(metrics.RegoModuleCompile).Start()
defer m.Timer(metrics.RegoModuleCompile).Stop()
modules := map[string]*ast.Module{}
// preserve any modules already on the compiler
for name, module := range compiler.Modules {
modules[name] = module
}
// preserve any modules passed in from the store
for name, module := range extraModules {
modules[name] = module
}
// include all the new bundle modules
for bundleName, b := range bundles {
if legacy {
for _, mf := range b.Modules {
modules[mf.Path] = mf.Parsed
}
} else {
for name, module := range b.ParsedModules(bundleName) {
modules[name] = module
}
}
}
if compiler.Compile(modules); compiler.Failed() {
return compiler.Errors
}
return nil
}
func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error {
m.Timer(metrics.RegoModuleCompile).Start()
@@ -439,7 +867,7 @@ func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool)
func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error {
collisions := map[string][]string{}
allBundles, err := ReadBundleNamesFromStore(ctx, store, txn)
if err != nil && !storage.IsNotFound(err) {
if suppressNotFound(err) != nil {
return err
}
@@ -448,7 +876,7 @@ func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Trans
// Build a map of roots for existing bundles already in the system
for _, name := range allBundles {
roots, err := ReadBundleRootsFromStore(ctx, store, txn, name)
if err != nil && !storage.IsNotFound(err) {
if suppressNotFound(err) != nil {
return err
}
allRoots[name] = roots
@@ -489,6 +917,47 @@ func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Trans
return nil
}
func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error {
for _, pat := range patches {
// construct patch path
path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/"))
if !ok {
return fmt.Errorf("error parsing patch path")
}
var op storage.PatchOp
switch pat.Op {
case "upsert":
op = storage.AddOp
_, err := store.Read(ctx, txn, path[:len(path)-1])
if err != nil {
if !storage.IsNotFound(err) {
return err
}
if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
return err
}
}
case "remove":
op = storage.RemoveOp
case "replace":
op = storage.ReplaceOp
default:
return fmt.Errorf("bad patch operation: %v", pat.Op)
}
// apply the patch
if err := store.Write(ctx, txn, op, path, pat.Value); err != nil {
return err
}
}
return nil
}
// Helpers for the older single (unnamed) bundle style manifest storage.
// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored.

View File

@@ -0,0 +1,231 @@
// Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
// Package bundle provide helpers that assist in the bundle signature verification process
package bundle
import (
"bytes"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
"github.com/open-policy-agent/opa/internal/jwx/jws"
"github.com/open-policy-agent/opa/internal/jwx/jws/verify"
"github.com/open-policy-agent/opa/util"
)
const defaultVerifierID = "_default"
var verifiers map[string]Verifier
// Verifier is the interface expected for implementations that verify bundle signatures.
type Verifier interface {
VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error)
}
// VerifyBundleSignature will retrieve the Verifier implementation based
// on the Plugin specified in SignaturesConfig, and call its implementation
// of VerifyBundleSignature. VerifyBundleSignature verifies the bundle signature
// using the given public keys or secret. If a signature is verified, it keeps
// track of the files specified in the JWT payload
func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
// default implementation does not return a nil for map, so don't
// do it here either
files := make(map[string]FileInfo)
var plugin string
// for backwards compatibility, check if there is no plugin specified, and use default
if sc.Plugin == "" {
plugin = defaultVerifierID
} else {
plugin = sc.Plugin
}
verifier, err := GetVerifier(plugin)
if err != nil {
return files, err
}
return verifier.VerifyBundleSignature(sc, bvc)
}
// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking
// the JWT signature using a locally-accessible public key.
type DefaultVerifier struct{}
// VerifyBundleSignature verifies the bundle signature using the given public keys or secret.
// If a signature is verified, it keeps track of the files specified in the JWT payload
func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
files := make(map[string]FileInfo)
if len(sc.Signatures) == 0 {
return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)")
}
if len(sc.Signatures) > 1 {
return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)")
}
for _, token := range sc.Signatures {
payload, err := verifyJWTSignature(token, bvc)
if err != nil {
return files, err
}
for _, file := range payload.Files {
files[file.Name] = file
}
}
return files, nil
}
func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) {
// decode JWT to check if the header specifies the key to use and/or if claims have the scope.
parts, err := jws.SplitCompact(token)
if err != nil {
return nil, err
}
var decodedHeader []byte
if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err)
}
var hdr jws.StandardHeaders
if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
return nil, fmt.Errorf("failed to parse JWT headers: %w", err)
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, err
}
var ds DecodedSignature
if err := json.Unmarshal(payload, &ds); err != nil {
return nil, err
}
// check for the id of the key to use for JWT signature verification
// first in the OPA config. If not found, then check the JWT kid.
keyID := bvc.KeyID
if keyID == "" {
keyID = hdr.KeyID
}
if keyID == "" {
// If header has no key id, check the deprecated key claim.
keyID = ds.KeyID
}
if keyID == "" {
return nil, fmt.Errorf("verification key ID is empty")
}
// now that we have the keyID, fetch the actual key
keyConfig, err := bvc.GetPublicKey(keyID)
if err != nil {
return nil, err
}
// verify JWT signature
alg := jwa.SignatureAlgorithm(keyConfig.Algorithm)
key, err := verify.GetSigningKey(keyConfig.Key, alg)
if err != nil {
return nil, err
}
_, err = jws.Verify([]byte(token), alg, key)
if err != nil {
return nil, err
}
// verify the scope
scope := bvc.Scope
if scope == "" {
scope = keyConfig.Scope
}
if ds.Scope != scope {
return nil, fmt.Errorf("scope mismatch")
}
return &ds, nil
}
// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature
func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error {
var file FileInfo
var ok bool
if file, ok = files[path]; !ok {
return fmt.Errorf("file %v not included in bundle signature", path)
}
if file.Algorithm == "" {
return fmt.Errorf("no hashing algorithm provided for file %v", path)
}
hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm))
if err != nil {
return err
}
// hash the file content
// For unstructured files, hash the byte stream of the file
// For structured files, read the byte stream and parse into a JSON structure;
// then recursively order the fields of all objects alphabetically and then apply
// the hash function to result to compute the hash. This ensures that the digital signature is
// independent of whitespace and other non-semantic JSON features.
var value interface{}
if IsStructuredDoc(path) {
err := util.Unmarshal(data.Bytes(), &value)
if err != nil {
return err
}
} else {
value = data.Bytes()
}
bs, err := hash.HashFile(value)
if err != nil {
return err
}
// compare file hash with same file in the JWT payloads
fb, err := hex.DecodeString(file.Hash)
if err != nil {
return err
}
if !bytes.Equal(fb, bs) {
return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs)
}
delete(files, path)
return nil
}
// GetVerifier returns the Verifier registered under the given id
func GetVerifier(id string) (Verifier, error) {
verifier, ok := verifiers[id]
if !ok {
return nil, fmt.Errorf("no verifier exists under id %s", id)
}
return verifier, nil
}
// RegisterVerifier registers a Verifier under the given id
func RegisterVerifier(id string, v Verifier) error {
if id == defaultVerifierID {
return fmt.Errorf("verifier id %s is reserved, use a different id", id)
}
verifiers[id] = v
return nil
}
func init() {
verifiers = map[string]Verifier{
defaultVerifierID: &DefaultVerifier{},
}
}