feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -21,9 +21,28 @@ import (
"github.com/open-policy-agent/opa/ast/internal/scanner"
"github.com/open-policy-agent/opa/ast/internal/tokens"
astJSON "github.com/open-policy-agent/opa/ast/json"
"github.com/open-policy-agent/opa/ast/location"
)
var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")}
// RegoVersion defines the Rego syntax requirements for a module.
type RegoVersion int
const (
// RegoV0 is the default, original Rego syntax.
RegoV0 RegoVersion = iota
// RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module).
// Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported.
RegoV0CompatV1
// RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.:
// future.keywords part of default keyword set, and don't require imports;
// 'if' and 'contains' required in rule heads;
// (some) strict checks on by default.
RegoV1
)
// Note: This state is kept isolated from the parser so that we
// can do efficient shallow copies of these values when doing a
// save() and restore().
@@ -84,7 +103,7 @@ func (c parsedTermCache) String() string {
s.WriteRune('{')
var e *parsedTermCacheItem
for e = c.m; e != nil; e = e.next {
fmt.Fprintf(&s, "%v", e)
s.WriteString(fmt.Sprintf("%v", e))
}
s.WriteRune('}')
return s.String()
@@ -96,14 +115,23 @@ func (e *parsedTermCacheItem) String() string {
// ParserOptions defines the options for parsing Rego statements.
type ParserOptions struct {
Capabilities *Capabilities
ProcessAnnotation bool
AllFutureKeywords bool
FutureKeywords []string
SkipRules bool
Capabilities *Capabilities
ProcessAnnotation bool
AllFutureKeywords bool
FutureKeywords []string
SkipRules bool
JSONOptions *astJSON.Options
// RegoVersion is the version of Rego to parse for.
RegoVersion RegoVersion
unreleasedKeywords bool // TODO(sr): cleanup
}
// EffectiveRegoVersion returns the effective RegoVersion to use for parsing.
// Deprecated: Use RegoVersion instead.
func (po *ParserOptions) EffectiveRegoVersion() RegoVersion {
return po.RegoVersion
}
// NewParser creates and initializes a Parser.
func NewParser() *Parser {
p := &Parser{
@@ -177,6 +205,18 @@ func (p *Parser) WithSkipRules(skip bool) *Parser {
return p
}
// WithJSONOptions sets the Options which will be set on nodes to configure
// their JSON marshaling behavior.
func (p *Parser) WithJSONOptions(jsonOptions *astJSON.Options) *Parser {
p.po.JSONOptions = jsonOptions
return p
}
func (p *Parser) WithRegoVersion(version RegoVersion) *Parser {
p.po.RegoVersion = version
return p
}
func (p *Parser) parsedTermCacheLookup() (*Term, *state) {
l := p.s.loc.Offset
// stop comparing once the cached offsets are lower than l
@@ -245,16 +285,23 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
allowedFutureKeywords := map[string]tokens.Token{}
for _, kw := range p.po.Capabilities.FutureKeywords {
var ok bool
allowedFutureKeywords[kw], ok = futureKeywords[kw]
if !ok {
return nil, nil, Errors{
&Error{
Code: ParseErr,
Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw),
Location: nil,
},
if p.po.RegoVersion == RegoV1 {
// RegoV1 includes all future keywords in the default language definition
for k, v := range futureKeywords {
allowedFutureKeywords[k] = v
}
} else {
for _, kw := range p.po.Capabilities.FutureKeywords {
var ok bool
allowedFutureKeywords[kw], ok = futureKeywords[kw]
if !ok {
return nil, nil, Errors{
&Error{
Code: ParseErr,
Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw),
Location: nil,
},
}
}
}
}
@@ -272,7 +319,7 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
}
selected := map[string]tokens.Token{}
if p.po.AllFutureKeywords {
if p.po.AllFutureKeywords || p.po.RegoVersion == RegoV1 {
for kw, tok := range allowedFutureKeywords {
selected[kw] = tok
}
@@ -293,6 +340,12 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
}
p.s.s = p.s.s.WithKeywords(selected)
if p.po.RegoVersion == RegoV1 {
for kw, tok := range allowedFutureKeywords {
p.s.s.AddKeyword(kw, tok)
}
}
// read the first token to initialize the parser
p.scan()
@@ -319,9 +372,14 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
s = p.save()
if imp := p.parseImport(); imp != nil {
if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) {
p.regoV1Import(imp)
}
if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) {
p.futureImport(imp, allowedFutureKeywords)
}
stmts = append(stmts, imp)
continue
} else if len(p.s.errors) > 0 {
@@ -357,6 +415,19 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
stmts = p.parseAnnotations(stmts)
}
if p.po.JSONOptions != nil {
for i := range stmts {
vis := NewGenericVisitor(func(x interface{}) bool {
if x, ok := x.(customJSON); ok {
x.setJSONOptions(*p.po.JSONOptions)
}
return false
})
vis.Walk(stmts[i])
}
}
return stmts, p.s.comments, p.s.errors
}
@@ -511,9 +582,9 @@ func (p *Parser) parseImport() *Import {
path := imp.Path.Value.(Ref)
if !RootDocumentNames.Contains(path[0]) && !FutureRootDocument.Equal(path[0]) {
if !RootDocumentNames.Contains(path[0]) && !FutureRootDocument.Equal(path[0]) && !RegoRootDocument.Equal(path[0]) {
p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v",
RootDocumentNames.Union(NewSet(FutureRootDocument)),
RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)),
path[0])
return nil
}
@@ -559,26 +630,32 @@ func (p *Parser) parseRules() []*Rule {
return nil
}
if usesContains {
rule.Head.keywords = append(rule.Head.keywords, tokens.Contains)
}
if rule.Default {
if !p.validateDefaultRuleValue(&rule) {
return nil
}
if len(rule.Head.Args) > 0 {
if !p.validateDefaultRuleArgs(&rule) {
return nil
}
}
rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
return []*Rule{&rule}
}
if usesContains && !rule.Head.Reference.IsGround() {
p.error(p.s.Loc(), "multi-value rules need ground refs")
return nil
}
// back-compat with `p[x] { ... }``
hasIf := p.s.tok == tokens.If
// p[x] if ... becomes a single-value rule p[x]
if hasIf && !usesContains && len(rule.Head.Ref()) == 2 {
if rule.Head.Value == nil {
rule.Head.generatedValue = true
rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location)
} else {
// p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat
@@ -607,6 +684,7 @@ func (p *Parser) parseRules() []*Rule {
switch {
case hasIf:
rule.Head.keywords = append(rule.Head.keywords, tokens.If)
p.scan()
s := p.save()
if expr := p.parseLiteral(); expr != nil {
@@ -638,6 +716,7 @@ func (p *Parser) parseRules() []*Rule {
case usesContains:
rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
rule.generatedBody = true
return []*Rule{&rule}
default:
@@ -645,7 +724,7 @@ func (p *Parser) parseRules() []*Rule {
}
if p.s.tok == tokens.Else {
if r := rule.Head.Ref(); len(r) > 1 && !r[len(r)-1].Value.IsGround() {
if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() {
p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head")
return nil
}
@@ -687,6 +766,7 @@ func (p *Parser) parseRules() []*Rule {
// rule's head AST but have their location
// set to the rule body.
next.Head = rule.Head.Copy()
next.Head.keywords = rule.Head.keywords
for i := range next.Head.Args {
if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() {
next.Head.Args[i].Value = Var(p.genwildcard())
@@ -706,6 +786,7 @@ func (p *Parser) parseElse(head *Head) *Rule {
rule.SetLoc(p.s.Loc())
rule.Head = head.Copy()
rule.Head.generatedValue = false
for i := range rule.Head.Args {
if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() {
rule.Head.Args[i].Value = Var(p.genwildcard())
@@ -721,6 +802,7 @@ func (p *Parser) parseElse(head *Head) *Rule {
switch p.s.tok {
case tokens.LBrace, tokens.If: // no value, but a body follows directly
rule.Head.generatedValue = true
rule.Head.Value = BooleanTerm(true)
case tokens.Assign, tokens.Unify:
rule.Head.Assign = tokens.Assign == p.s.tok
@@ -736,42 +818,37 @@ func (p *Parser) parseElse(head *Head) *Rule {
}
hasIf := p.s.tok == tokens.If
hasLBrace := p.s.tok == tokens.LBrace
if hasIf {
p.scan()
s := p.save()
if expr := p.parseLiteral(); expr != nil {
// NOTE(sr): set literals are never false or undefined, so parsing this as
// p if false else if { true }
// ^^^^^^^^ set of one element, `true`
// isn't valid.
isSetLiteral := false
if t, ok := expr.Terms.(*Term); ok {
_, isSetLiteral = t.Value.(Set)
}
// expr.Term is []*Term or Every
if !isSetLiteral {
rule.Body.Append(expr)
setLocRecursive(rule.Body, rule.Location)
return &rule
}
}
p.restore(s)
}
if p.s.tok != tokens.LBrace {
if !hasIf && !hasLBrace {
rule.Body = NewBody(NewExpr(BooleanTerm(true)))
rule.generatedBody = true
setLocRecursive(rule.Body, rule.Location)
return &rule
}
p.scan()
if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
return nil
if hasIf {
rule.Head.keywords = append(rule.Head.keywords, tokens.If)
p.scan()
}
p.scan()
if p.s.tok == tokens.LBrace {
p.scan()
if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
return nil
}
p.scan()
} else if p.s.tok != tokens.EOF {
expr := p.parseLiteral()
if expr == nil {
return nil
}
rule.Body.Append(expr)
setLocRecursive(rule.Body, rule.Location)
} else {
p.illegal("rule body expected")
return nil
}
if p.s.tok == tokens.Else {
if rule.Else = p.parseElse(head); rule.Else == nil {
@@ -782,7 +859,6 @@ func (p *Parser) parseElse(head *Head) *Rule {
}
func (p *Parser) parseHead(defaultRule bool) (*Head, bool) {
head := &Head{}
loc := p.s.Loc()
defer func() {
@@ -805,7 +881,9 @@ func (p *Parser) parseHead(defaultRule bool) (*Head, bool) {
switch x := ref.Value.(type) {
case Var:
head = NewHead(x)
// Modify the code to add the location to the head ref
// and set the head ref's jsonOptions.
head = VarHead(x, ref.Location, p.po.JSONOptions)
case Ref:
head = RefHead(x)
case Call:
@@ -872,6 +950,7 @@ func (p *Parser) parseHead(defaultRule bool) (*Head, bool) {
if head.Value == nil && head.Key == nil {
if len(head.Ref()) != 2 || len(head.Args) > 0 {
head.generatedValue = true
head.Value = BooleanTerm(true).SetLocation(head.Location)
}
}
@@ -891,7 +970,6 @@ func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body {
}
for {
expr := p.parseLiteral()
if expr == nil {
return nil
@@ -1952,7 +2030,7 @@ func (p *Parser) error(loc *location.Location, reason string) {
func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) {
msg := strings.Builder{}
fmt.Fprintf(&msg, f, a...)
msg.WriteString(fmt.Sprintf(f, a...))
switch len(p.s.hints) {
case 0: // nothing to do
@@ -2127,6 +2205,38 @@ func (p *Parser) validateDefaultRuleValue(rule *Rule) bool {
return valid
}
func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool {
valid := true
vars := NewVarSet()
vis := NewGenericVisitor(func(x interface{}) bool {
switch x := x.(type) {
case Var:
if vars.Contains(x) {
p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x))
valid = false
return true
}
vars.Add(x)
case *Term:
switch v := x.Value.(type) {
case Var: // do nothing
default:
p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", TypeName(v)))
valid = false
return true
}
}
return false
})
vis.Walk(rule.Head.Args)
return valid
}
// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources',
// which isn't handled properly by json for some reason.
type rawAnnotation struct {
@@ -2459,6 +2569,11 @@ func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]toke
return
}
if p.s.s.RegoV1Compatible() {
p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef)
return
}
kwds := make([]string, 0, len(allowedFutureKeywords))
for k := range allowedFutureKeywords {
kwds = append(kwds, k)
@@ -2486,3 +2601,44 @@ func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]toke
p.s.s.AddKeyword(kw, allowedFutureKeywords[kw])
}
}
func (p *Parser) regoV1Import(imp *Import) {
if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) {
p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef)
return
}
if p.po.RegoVersion == RegoV1 {
// We're parsing for Rego v1, where the 'rego.v1' import is a no-op.
return
}
path := imp.Path.Value.(Ref)
if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 {
p.errorf(imp.Path.Location, "invalid import, must be `%s`", RegoV1CompatibleRef)
return
}
if imp.Alias != "" {
p.errorf(imp.Path.Location, "`rego` imports cannot be aliased")
return
}
// import all future keywords with the rego.v1 import
kwds := make([]string, 0, len(futureKeywords))
for k := range futureKeywords {
kwds = append(kwds, k)
}
if p.s.s.HasKeyword(futureKeywords) && !p.s.s.RegoV1Compatible() {
// We have imported future keywords, but they didn't come from another `rego.v1` import.
p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef)
return
}
p.s.s.SetRegoV1Compatible()
for _, kw := range kwds {
p.s.s.AddKeyword(kw, futureKeywords[kw])
}
}