feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -1,165 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alerting
import (
"context"
"github.com/go-logr/logr"
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promlabels "github.com/prometheus/prometheus/model/labels"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
alertingv2beta1 "kubesphere.io/api/alerting/v2beta1"
)
type ClusterRuleGroupReconciler struct {
client.Client
Log logr.Logger
}
func (r *ClusterRuleGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var (
log = r.Log
ruleLevel = RuleLevelCluster
clusterrulegroupList = alertingv2beta1.ClusterRuleGroupList{}
promruleNamespace = PrometheusRuleNamespace
)
// get all enabled clusterrulegroups
err := r.Client.List(ctx, &clusterrulegroupList, &client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{
SourceGroupResourceLabelKeyEnable: SourceGroupResourceLabelValueEnableTrue,
}),
})
if err != nil {
return reconcile.Result{}, err
}
// add rule_id label that may have been missed
var updated bool
for i := range clusterrulegroupList.Items {
g := clusterrulegroupList.Items[i]
for j := range g.Spec.Rules {
if g.Spec.Rules[j].Labels == nil {
g.Spec.Rules[j].Labels = make(map[string]string)
}
if _, ok := g.Spec.Rules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]; !ok {
g.Spec.Rules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId] = string(uuid.NewUUID())
err = r.Client.Update(ctx, &g)
if err != nil {
return reconcile.Result{}, err
}
updated = true
}
}
}
if updated {
return reconcile.Result{}, nil
}
// labels added to rule.labels
enforceRuleLabels := map[string]string{
RuleLabelKeyRuleLevel: string(ruleLevel),
}
// matchers enforced to rule.expr
enforceRuleMatchers := []*promlabels.Matcher{}
// labels added to PrometheusRule.metadata.labels
promruleLabelSet := labels.Set{
PrometheusRuleResourceLabelKeyRuleLevel: string(ruleLevel),
}
enforceFuncs := createEnforceRuleFuncs(enforceRuleMatchers, enforceRuleLabels)
// make PrometheusRule Groups
rulegroups, err := makePrometheusRuleGroups(log, &clusterrulegroupList, enforceFuncs...)
if err != nil {
return reconcile.Result{}, err
}
if len(rulegroups) == 0 {
err = r.Client.DeleteAllOf(ctx, &promresourcesv1.PrometheusRule{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
Namespace: promruleNamespace,
LabelSelector: labels.SelectorFromSet(promruleLabelSet),
},
})
return reconcile.Result{}, err
}
// make desired PrometheuRule resources
desired, err := makePrometheusRuleResources(rulegroups, promruleNamespace, PrometheusRulePrefixClusterLevel, promruleLabelSet, nil)
if err != nil {
return reconcile.Result{}, err
}
// get current PrometheusRules
var current promresourcesv1.PrometheusRuleList
err = r.Client.List(ctx, &current, &client.ListOptions{
Namespace: promruleNamespace,
LabelSelector: labels.SelectorFromSet(promruleLabelSet),
})
if err != nil {
return reconcile.Result{}, err
}
// update relevant prometheusrule resources
err = bulkUpdatePrometheusRuleResources(r.Client, ctx, current.Items, desired)
if err != nil && (apierrors.IsConflict(err) || apierrors.IsAlreadyExists(err)) {
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, err
}
func (r *ClusterRuleGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Log.GetSink() == nil {
r.Log = mgr.GetLogger()
}
if r.Client == nil {
r.Client = mgr.GetClient()
}
ctr, err := controller.New("clusterrulegroup", mgr,
controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = ctr.Watch(
&source.Kind{Type: &alertingv2beta1.ClusterRuleGroup{}},
handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request {
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Namespace: PrometheusRuleNamespace,
},
}}
}))
return err
}

View File

@@ -1,161 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alerting
import (
"context"
"github.com/go-logr/logr"
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
alertingv2beta1 "kubesphere.io/api/alerting/v2beta1"
)
type GlobalRuleGroupReconciler struct {
client.Client
Log logr.Logger
}
func (r *GlobalRuleGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var (
log = r.Log
ruleLevel = RuleLevelGlobal
globalrulegroupList = alertingv2beta1.GlobalRuleGroupList{}
promruleNamespace = PrometheusRuleNamespace
)
// get all enabled globalrulegroups
err := r.Client.List(ctx, &globalrulegroupList, &client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{
SourceGroupResourceLabelKeyEnable: SourceGroupResourceLabelValueEnableTrue,
}),
})
if err != nil {
return reconcile.Result{}, err
}
// add rule_id label that may have been missed
var updated bool
for i := range globalrulegroupList.Items {
g := globalrulegroupList.Items[i]
for j := range g.Spec.Rules {
if g.Spec.Rules[j].Labels == nil {
g.Spec.Rules[j].Labels = make(map[string]string)
}
if _, ok := g.Spec.Rules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]; !ok {
g.Spec.Rules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId] = string(uuid.NewUUID())
err = r.Client.Update(ctx, &g)
if err != nil {
return reconcile.Result{}, err
}
updated = true
}
}
}
if updated {
return reconcile.Result{}, nil
}
// labels added to rule.labels
enforceRuleLabels := map[string]string{
RuleLabelKeyRuleLevel: string(ruleLevel),
}
// labels added to PrometheusRule.metadata.labels
promruleLabelSet := labels.Set{
PrometheusRuleResourceLabelKeyRuleLevel: string(ruleLevel),
}
enforceFuncs := createEnforceRuleFuncs(nil, enforceRuleLabels)
// make PrometheusRule Groups
rulegroups, err := makePrometheusRuleGroups(log, &globalrulegroupList, enforceFuncs...)
if err != nil {
return reconcile.Result{}, err
}
if len(rulegroups) == 0 {
err = r.Client.DeleteAllOf(ctx, &promresourcesv1.PrometheusRule{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
Namespace: promruleNamespace,
LabelSelector: labels.SelectorFromSet(promruleLabelSet),
},
})
return reconcile.Result{}, err
}
// make desired PrometheuRule resources
desired, err := makePrometheusRuleResources(rulegroups, promruleNamespace, PrometheusRulePrefixGlobalLevel, promruleLabelSet, nil)
if err != nil {
return reconcile.Result{}, err
}
// get current PrometheusRules
var current promresourcesv1.PrometheusRuleList
err = r.Client.List(ctx, &current, &client.ListOptions{
Namespace: promruleNamespace,
LabelSelector: labels.SelectorFromSet(promruleLabelSet),
})
if err != nil {
return reconcile.Result{}, err
}
err = bulkUpdatePrometheusRuleResources(r.Client, ctx, current.Items, desired)
if err != nil && (apierrors.IsConflict(err) || apierrors.IsAlreadyExists(err)) {
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, err
}
func (r *GlobalRuleGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Log.GetSink() == nil {
r.Log = mgr.GetLogger()
}
if r.Client == nil {
r.Client = mgr.GetClient()
}
ctr, err := controller.New("globalrulegroup", mgr,
controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = ctr.Watch(
&source.Kind{Type: &alertingv2beta1.GlobalRuleGroup{}},
handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request {
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Namespace: PrometheusRuleNamespace,
},
}}
}))
return err
}

View File

@@ -1,194 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alerting
import (
"context"
"fmt"
"github.com/go-logr/logr"
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promlabels "github.com/prometheus/prometheus/model/labels"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
alertingv2beta1 "kubesphere.io/api/alerting/v2beta1"
)
type RuleGroupReconciler struct {
client.Client
Log logr.Logger
}
func (r *RuleGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
var (
log = r.Log.WithValues("namespace", req.Namespace)
ruleLevel = RuleLevelNamesapce
rulegroupNamespace = req.Namespace
rulegroupList = alertingv2beta1.RuleGroupList{}
promruleNamespace = PrometheusRuleNamespace
)
// get all enabled rulegroups
err := r.Client.List(ctx, &rulegroupList, &client.ListOptions{
Namespace: rulegroupNamespace,
LabelSelector: labels.SelectorFromSet(labels.Set{
SourceGroupResourceLabelKeyEnable: SourceGroupResourceLabelValueEnableTrue,
}),
})
if err != nil {
return reconcile.Result{}, err
}
// add rule_id label that may have been missed
var updated bool
for i := range rulegroupList.Items {
g := rulegroupList.Items[i]
for j := range g.Spec.Rules {
if g.Spec.Rules[j].Labels == nil {
g.Spec.Rules[j].Labels = make(map[string]string)
}
if _, ok := g.Spec.Rules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]; !ok {
g.Spec.Rules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId] = string(uuid.NewUUID())
err = r.Client.Update(ctx, &g)
if err != nil {
return reconcile.Result{}, err
}
updated = true
}
}
}
if updated {
return reconcile.Result{}, nil
}
// labels added to rule.labels
enforceRuleLabels := map[string]string{
RuleLabelKeyRuleLevel: string(ruleLevel),
RuleLabelKeyNamespace: rulegroupNamespace,
}
// matchers enforced to rule.expr
enforceRuleMatchers := []*promlabels.Matcher{{
Type: promlabels.MatchEqual,
Name: RuleLabelKeyNamespace,
Value: rulegroupNamespace,
}}
// labels added to PrometheusRule.metadata.labels
promruleLabelSet := labels.Set{
PrometheusRuleResourceLabelKeyRuleLevel: string(ruleLevel),
PrometheusRuleResourceLabelKeyOwnerNamespace: rulegroupNamespace,
}
enforceFuncs := createEnforceRuleFuncs(enforceRuleMatchers, enforceRuleLabels)
// make PrometheusRule Groups
rulegroups, err := makePrometheusRuleGroups(log, &rulegroupList, enforceFuncs...)
if err != nil {
return reconcile.Result{}, err
}
if len(rulegroups) == 0 {
err = r.Client.DeleteAllOf(ctx, &promresourcesv1.PrometheusRule{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
Namespace: promruleNamespace,
LabelSelector: labels.SelectorFromSet(promruleLabelSet),
},
})
return reconcile.Result{}, err
}
var ns corev1.Namespace
err = r.Client.Get(ctx, types.NamespacedName{Name: rulegroupNamespace}, &ns)
if err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if !ns.DeletionTimestamp.IsZero() {
// if the namespace is being deleted, ignoring it because
// the PrometheusRules with the namespace owner will be garbage collected by k8s.
return reconcile.Result{}, nil
}
ownerReferences := []metav1.OwnerReference{{
APIVersion: ns.APIVersion,
Kind: ns.Kind,
Name: ns.Name,
UID: ns.UID,
}}
// make desired PrometheuRule resources
namePrefix := fmt.Sprintf("%s%s-", PrometheusRulePrefixNamespaceLevel, rulegroupNamespace)
desired, err := makePrometheusRuleResources(rulegroups, promruleNamespace, namePrefix, promruleLabelSet, ownerReferences)
if err != nil {
return reconcile.Result{}, err
}
// get current PrometheusRules
var current promresourcesv1.PrometheusRuleList
err = r.Client.List(ctx, &current, &client.ListOptions{
Namespace: promruleNamespace,
LabelSelector: labels.SelectorFromSet(promruleLabelSet),
})
if err != nil {
return reconcile.Result{}, err
}
// update relevant prometheusrule resources
err = bulkUpdatePrometheusRuleResources(r.Client, ctx, current.Items, desired)
if err != nil && (apierrors.IsConflict(err) || apierrors.IsAlreadyExists(err)) {
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, err
}
func (r *RuleGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Log.GetSink() == nil {
r.Log = mgr.GetLogger()
}
if r.Client == nil {
r.Client = mgr.GetClient()
}
ctr, err := controller.New("rulegroup", mgr,
controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = ctr.Watch(
&source.Kind{Type: &alertingv2beta1.RuleGroup{}},
handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request {
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Namespace: o.GetNamespace(),
},
}}
}))
return err
}

View File

@@ -1,428 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alerting
import (
"context"
"reflect"
"sort"
"strconv"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"github.com/prometheus-community/prom-label-proxy/injectproxy"
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
promlabels "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
alertingv2beta1 "kubesphere.io/api/alerting/v2beta1"
"kubesphere.io/kubesphere/pkg/constants"
)
const (
RuleLevelNamesapce RuleLevel = "namespace"
RuleLevelCluster RuleLevel = "cluster"
RuleLevelGlobal RuleLevel = "global"
RuleTypeTemplate RuleType = "template" // for template rule configured by exprBuilder to build expression
RuleTypeCustom RuleType = "custom" // for custom rule configured by direct expression
// for rule.labels
RuleLabelKeyRuleLevel = "rule_level"
RuleLabelKeyRuleGroup = "rule_group"
RuleLabelKeyRuleType = "rule_type"
RuleLabelKeyCluster = "cluster"
RuleLabelKeyNamespace = "namespace"
RuleLabelKeySeverity = "severity"
RuleLabelKeyAlertType = "alerttype"
RuleLabelValueAlertTypeMetric = "metric"
// label keys in RuleGroup/ClusterRuleGroup/GlobalRuleGroup.metadata.labels
SourceGroupResourceLabelKeyEnable = "alerting.kubesphere.io/enable"
SourceGroupResourceLabelValueEnableTrue = "true"
SourceGroupResourceLabelValueEnableFalse = "false"
// for PrometheusRule.metadata.labels
PrometheusRuleResourceLabelKeyOwnerNamespace = "alerting.kubesphere.io/owner_namespace"
PrometheusRuleResourceLabelKeyOwnerCluster = "alerting.kubesphere.io/owner_cluster"
PrometheusRuleResourceLabelKeyRuleLevel = "alerting.kubesphere.io/rule_level"
PrometheusRuleResourceLabelKeyBuiltin = "alerting.kubesphere.io/builtin"
PrometheusRuleResourceLabelValueBuiltinTrue = "true"
PrometheusRuleResourceLabelValueBuiltinFalse = "false"
// name prefix for PrometheusRule
PrometheusRulePrefix = "alertrules-"
PrometheusRulePrefixNamespaceLevel = PrometheusRulePrefix + "ns-"
PrometheusRulePrefixClusterLevel = PrometheusRulePrefix + "cl-"
PrometheusRulePrefixGlobalLevel = PrometheusRulePrefix + "gl-"
PrometheusRuleNamespace = constants.KubeSphereMonitoringNamespace
)
type RuleLevel string
type RuleType string
var maxConfigMapDataSize = int(float64(corev1.MaxSecretSize) * 0.5)
type enforceRuleFunc func(rule *promresourcesv1.Rule) error
type EnforceExprFunc func(expr string) (string, error)
var emptyEnforceExprFunc = func(expr string) (string, error) {
return expr, nil
}
func CreateEnforceExprFunc(enforceRuleMatchers []*promlabels.Matcher) EnforceExprFunc {
if len(enforceRuleMatchers) > 0 {
enforcer := injectproxy.NewEnforcer(false, enforceRuleMatchers...)
return func(expr string) (string, error) {
parsedExpr, err := parser.ParseExpr(expr)
if err != nil {
return expr, err
}
if err := enforcer.EnforceNode(parsedExpr); err != nil {
return expr, err
}
return parsedExpr.String(), nil
}
}
return emptyEnforceExprFunc
}
func createEnforceRuleFuncs(enforceRuleMatchers []*promlabels.Matcher, enforceRuleLabels map[string]string) []enforceRuleFunc {
var enforceFuncs []enforceRuleFunc
// enforce func for rule.expr
if len(enforceRuleMatchers) > 0 {
enforceExprFunc := CreateEnforceExprFunc(enforceRuleMatchers)
enforceFuncs = append(enforceFuncs, func(rule *promresourcesv1.Rule) error {
expr, err := enforceExprFunc(rule.Expr.String())
if err != nil {
return err
}
rule.Expr = intstr.FromString(expr)
return nil
})
}
// enforce func for rule.labels
if len(enforceRuleLabels) > 0 {
enforceFuncs = append(enforceFuncs, func(rule *promresourcesv1.Rule) error {
if rule.Labels == nil {
rule.Labels = make(map[string]string)
}
for n, v := range enforceRuleLabels {
rule.Labels[n] = v
}
return nil
})
}
return enforceFuncs
}
func makePrometheusRuleGroups(log logr.Logger, groupList client.ObjectList,
commonEnforceFuncs ...enforceRuleFunc) ([]*promresourcesv1.RuleGroup, error) {
var rulegroups []*promresourcesv1.RuleGroup
convertRule := func(rule *alertingv2beta1.Rule, groupName string, enforceFuncs ...enforceRuleFunc) (*promresourcesv1.Rule, error) {
if rule.Disable { // ignoring disabled rule
return nil, nil
}
rule = rule.DeepCopy()
if rule.Labels == nil {
rule.Labels = make(map[string]string)
}
if rule.Severity != "" {
rule.Labels[RuleLabelKeySeverity] = string(rule.Severity)
}
prule := promresourcesv1.Rule{
Alert: rule.Alert,
For: promresourcesv1.Duration(rule.For),
Expr: rule.Expr,
Labels: rule.Labels,
Annotations: rule.Annotations,
}
enforceFuncs = append(enforceFuncs, commonEnforceFuncs...)
// enforce rule group label and alert type label
enforceFuncs = append(enforceFuncs, func(rule *promresourcesv1.Rule) error {
if rule.Labels == nil {
rule.Labels = make(map[string]string)
}
rule.Labels[RuleLabelKeyRuleGroup] = groupName
rule.Labels[RuleLabelKeyAlertType] = RuleLabelValueAlertTypeMetric
return nil
})
for _, f := range enforceFuncs {
if f == nil {
continue
}
err := f(&prule)
if err != nil {
return nil, errors.Wrapf(err, "alert: %s", rule.Alert)
}
}
return &prule, nil
}
switch list := groupList.(type) {
case *alertingv2beta1.RuleGroupList:
for _, group := range list.Items {
var prules []promresourcesv1.Rule
for _, rule := range group.Spec.Rules {
prule, err := convertRule(&rule.Rule, group.Name)
if err != nil {
log.WithValues("rulegroup", group.Namespace+"/"+group.Name).Error(err, "failed to convert")
continue
}
if prule != nil {
if rule.ExprBuilder != nil && rule.ExprBuilder.Workload != nil {
prule.Labels[RuleLabelKeyRuleType] = string(RuleTypeTemplate)
} else {
prule.Labels[RuleLabelKeyRuleType] = string(RuleTypeCustom)
}
prules = append(prules, *prule)
}
}
if len(prules) == 0 {
continue
}
rulegroups = append(rulegroups, &promresourcesv1.RuleGroup{
Name: group.Name,
Interval: promresourcesv1.Duration(group.Spec.Interval),
PartialResponseStrategy: group.Spec.PartialResponseStrategy,
Rules: prules,
})
}
case *alertingv2beta1.ClusterRuleGroupList:
for _, group := range list.Items {
var prules []promresourcesv1.Rule
for _, rule := range group.Spec.Rules {
prule, err := convertRule(&rule.Rule, group.Name)
if err != nil {
log.WithValues("clusterrulegroup", group.Name).Error(err, "failed to convert")
continue
}
if prule != nil {
if rule.ExprBuilder != nil && rule.ExprBuilder.Node != nil {
prule.Labels[RuleLabelKeyRuleType] = string(RuleTypeTemplate)
} else {
prule.Labels[RuleLabelKeyRuleType] = string(RuleTypeCustom)
}
prules = append(prules, *prule)
}
}
if len(prules) == 0 {
continue
}
rulegroups = append(rulegroups, &promresourcesv1.RuleGroup{
Name: group.Name,
Interval: promresourcesv1.Duration(group.Spec.Interval),
PartialResponseStrategy: group.Spec.PartialResponseStrategy,
Rules: prules,
})
}
case *alertingv2beta1.GlobalRuleGroupList:
for _, group := range list.Items {
var prules []promresourcesv1.Rule
for _, rule := range group.Spec.Rules {
prule, err := convertRule(&rule.Rule, group.Name,
createEnforceRuleFuncs(ParseGlobalRuleEnforceMatchers(&rule), nil)...)
if err != nil {
log.WithValues("globalrulegroup", group.Name).Error(err, "failed to convert")
continue
}
if prule != nil {
if rule.ExprBuilder != nil && (rule.ExprBuilder.Node != nil || rule.ExprBuilder.Workload != nil) {
prule.Labels[RuleLabelKeyRuleType] = string(RuleTypeTemplate)
} else {
prule.Labels[RuleLabelKeyRuleType] = string(RuleTypeCustom)
}
prules = append(prules, *prule)
}
}
if len(prules) == 0 {
continue
}
rulegroups = append(rulegroups, &promresourcesv1.RuleGroup{
Name: group.Name,
Interval: promresourcesv1.Duration(group.Spec.Interval),
PartialResponseStrategy: group.Spec.PartialResponseStrategy,
Rules: prules,
})
}
}
return rulegroups, nil
}
func ParseGlobalRuleEnforceMatchers(rule *alertingv2beta1.GlobalRule) []*promlabels.Matcher {
var enforceRuleMatchers []*promlabels.Matcher
if rule.ClusterSelector != nil {
matcher := rule.ClusterSelector.ParseToMatcher(RuleLabelKeyCluster)
if matcher != nil {
enforceRuleMatchers = append(enforceRuleMatchers, matcher)
}
}
if rule.NamespaceSelector != nil {
matcher := rule.NamespaceSelector.ParseToMatcher(RuleLabelKeyNamespace)
if matcher != nil {
enforceRuleMatchers = append(enforceRuleMatchers, matcher)
}
}
return enforceRuleMatchers
}
func makePrometheusRuleResources(rulegroups []*promresourcesv1.RuleGroup, namespace, namePrefix string,
labels map[string]string, ownerReferences []metav1.OwnerReference) ([]*promresourcesv1.PrometheusRule, error) {
promruleSpecs, err := makePrometheusRuleSpecs(rulegroups)
if err != nil {
return nil, err
}
var ps = make([]*promresourcesv1.PrometheusRule, len(promruleSpecs))
for i := range promruleSpecs {
ps[i] = &promresourcesv1.PrometheusRule{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: namePrefix + strconv.Itoa(i),
Labels: labels,
OwnerReferences: ownerReferences,
},
Spec: *promruleSpecs[i],
}
}
return ps, nil
}
type rulegroupsWrapper struct {
rulegroups []*promresourcesv1.RuleGroup
by func(g1, g2 *promresourcesv1.RuleGroup) bool
}
func (w rulegroupsWrapper) Len() int {
return len(w.rulegroups)
}
func (w rulegroupsWrapper) Swap(i, j int) {
w.rulegroups[i], w.rulegroups[j] = w.rulegroups[j], w.rulegroups[i]
}
func (w rulegroupsWrapper) Less(i, j int) bool {
return w.by(w.rulegroups[i], w.rulegroups[j])
}
func makePrometheusRuleSpecs(rulegroups []*promresourcesv1.RuleGroup) ([]*promresourcesv1.PrometheusRuleSpec, error) {
sort.Sort(rulegroupsWrapper{
rulegroups: rulegroups,
by: func(g1, g2 *promresourcesv1.RuleGroup) bool {
return g1.Name < g2.Name
},
})
var (
pSpecs []*promresourcesv1.PrometheusRuleSpec
pSpec = &promresourcesv1.PrometheusRuleSpec{}
size int
)
for i := range rulegroups {
rulegroup := rulegroups[i]
content, err := yaml.Marshal(rulegroup)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal content")
}
contentLen := len(string(content))
size += contentLen
if size > maxConfigMapDataSize*80/100 { // leave space for enforcing possiable label matchers into expr
pSpecs = append(pSpecs, pSpec)
// reinit
size = contentLen
pSpec = &promresourcesv1.PrometheusRuleSpec{}
}
pSpec.Groups = append(pSpec.Groups, *rulegroup)
}
if len(pSpec.Groups) > 0 {
pSpecs = append(pSpecs, pSpec)
}
return pSpecs, nil
}
func bulkUpdatePrometheusRuleResources(client client.Client, ctx context.Context, current, desired []*promresourcesv1.PrometheusRule) error {
var (
currentMap = make(map[string]*promresourcesv1.PrometheusRule)
desiredMap = make(map[string]*promresourcesv1.PrometheusRule)
err error
)
for i := range current {
promrule := current[i]
currentMap[promrule.Namespace+"/"+promrule.Name] = promrule
}
for i := range desired {
promrule := desired[i]
desiredMap[promrule.Namespace+"/"+promrule.Name] = promrule
}
// update if exists in current PrometheusRules, or create
for name, desired := range desiredMap {
if current, ok := currentMap[name]; ok {
if !reflect.DeepEqual(current.Spec, desired.Spec) ||
!reflect.DeepEqual(current.Labels, desired.Labels) ||
!reflect.DeepEqual(current.OwnerReferences, desired.OwnerReferences) {
desired.SetResourceVersion(current.ResourceVersion)
err = client.Update(ctx, desired)
if err != nil {
return err
}
}
} else {
err = client.Create(ctx, desired)
if err != nil {
return err
}
}
}
// delete if not in desired PrometheusRules
for name, current := range currentMap {
if _, ok := desiredMap[name]; !ok {
err = client.Delete(ctx, current)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
return err
}
}
}
return nil
}

View File

@@ -0,0 +1,134 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"strings"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
categoryController = "app-category"
categoryFinalizer = "categories.application.kubesphere.io"
)
var _ reconcile.Reconciler = &AppCategoryReconciler{}
var _ kscontroller.Controller = &AppCategoryReconciler{}
type AppCategoryReconciler struct {
client.Client
}
func (r *AppCategoryReconciler) Name() string {
return categoryController
}
func (r *AppCategoryReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *AppCategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewControllerManagedBy(mgr).
Named(categoryController).
For(&appv2.Category{}).
Watches(
&appv2.Application{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request {
var requests []reconcile.Request
app := object.(*appv2.Application)
if categoryID := app.Labels[appv2.AppCategoryNameKey]; categoryID != "" {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{Name: categoryID},
})
}
return requests
}),
builder.WithPredicates(predicate.LabelChangedPredicate{}),
).
Complete(r)
}
func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.V(4).Info("reconcile", "app category", req.String())
category := &appv2.Category{}
if err := r.Client.Get(ctx, req.NamespacedName, category); err != nil {
if errors.IsNotFound(err) {
if req.Name == appv2.UncategorizedCategoryID {
return reconcile.Result{}, r.ensureUncategorizedCategory()
}
// ignore exceptions caused by incorrectly adding app labels.
klog.Errorf("not found %s, check if you added the correct app category", req.String())
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
if !controllerutil.ContainsFinalizer(category, categoryFinalizer) {
category.ObjectMeta.Finalizers = append(category.ObjectMeta.Finalizers, categoryFinalizer)
return ctrl.Result{}, r.Update(ctx, category)
}
if !category.ObjectMeta.DeletionTimestamp.IsZero() {
// our finalizer is present, so lets handle our external dependency
// remove our finalizer from the list and update it.
if category.Status.Total > 0 {
klog.Errorf("can not delete helm category: %s which owns applications", req.String())
return reconcile.Result{}, nil
}
controllerutil.RemoveFinalizer(category, categoryFinalizer)
return reconcile.Result{}, r.Update(ctx, category)
}
apps := &appv2.ApplicationList{}
opts := client.MatchingLabels{
appv2.AppCategoryNameKey: category.Name,
appv2.RepoIDLabelKey: appv2.UploadRepoKey,
}
if err := r.List(ctx, apps, opts); err != nil {
klog.Errorf("failed to list apps: %v", err)
return ctrl.Result{}, err
}
if category.Status.Total != len(apps.Items) {
category.Status.Total = len(apps.Items)
if err := r.Status().Update(ctx, category); err != nil {
klog.Errorf("failed to update category status: %v", err)
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
func (r *AppCategoryReconciler) ensureUncategorizedCategory() error {
ctg := &appv2.Category{}
err := r.Get(context.TODO(), types.NamespacedName{Name: appv2.UncategorizedCategoryID}, ctg)
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("failed to get uncategorized category: %v", err)
return err
}
ctg.Name = appv2.UncategorizedCategoryID
return r.Create(context.TODO(), ctg)
}

View File

@@ -1,263 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/application/api/v1beta1"
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
)
const (
applicationName = "bookinfo"
serviceName = "productpage"
timeout = time.Second * 30
interval = time.Second * 2
)
var replicas = int32(2)
var _ = Context("Inside of a new namespace", func() {
ctx := context.TODO()
ns := SetupTest(ctx)
Describe("Application", func() {
applicationLabels := map[string]string{
"app.kubernetes.io/name": "bookinfo",
"app.kubernetes.io/version": "1",
}
BeforeEach(func() {
By("create deployment,service,application objects")
service := newService(serviceName, ns.Name, applicationLabels)
deployments := []*v1.Deployment{newDeployments(serviceName, ns.Name, applicationLabels, "v1")}
app := newApplication(applicationName, ns.Name, applicationLabels)
Expect(k8sClient.Create(ctx, service.DeepCopy())).Should(Succeed())
for i := range deployments {
deployment := deployments[i]
Expect(k8sClient.Create(ctx, deployment.DeepCopy())).Should(Succeed())
}
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
})
Context("Application Controller", func() {
It("Should not reconcile application", func() {
By("update application labels")
application := &v1beta1.Application{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
Expect(err).Should(Succeed())
updateApplication := func(object interface{}) {
newApp := object.(*v1beta1.Application)
newApp.Labels["kubesphere.io/creator"] = ""
}
updated, err := updateWithRetries(k8sClient, ctx, application.Namespace, applicationName, updateApplication, 1*time.Second, 5*time.Second)
Expect(updated).Should(BeTrue())
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
// application status field should not be populated with selected deployments and services
return len(application.Status.ComponentList.Objects) == 0
}, timeout, interval).Should(BeTrue())
})
It("Should reconcile application successfully", func() {
By("check if application status been updated by controller")
application := &v1beta1.Application{}
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
Expect(err).Should(Succeed())
// application status field should be populated by controller
return len(application.Status.ComponentList.Objects) > 0
}, timeout, interval).Should(BeTrue())
})
})
})
})
type UpdateObjectFunc func(obj interface{})
func updateWithRetries(client client.Client, ctx context.Context, namespace, name string, updateFunc UpdateObjectFunc, interval, timeout time.Duration) (bool, error) {
var updateErr error
pollErr := wait.PollImmediate(interval, timeout, func() (done bool, err error) {
app := &v1beta1.Application{}
if err = client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, app); err != nil {
return false, err
}
updateFunc(app)
if err = client.Update(ctx, app); err == nil {
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided update to object %q: %v", name, updateErr)
return false, pollErr
}
return true, nil
}
func newDeployments(deploymentName, namespace string, labels map[string]string, version string) *v1.Deployment {
labels["app"] = deploymentName
labels["version"] = version
deployment := &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", deploymentName, version),
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
},
Spec: v1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "c1",
Image: "nginx:latest",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
ContainerPort: 443,
Protocol: corev1.ProtocolTCP,
},
{
Name: "mysql",
ContainerPort: 3306,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
},
},
Status: v1.DeploymentStatus{
AvailableReplicas: replicas,
ReadyReplicas: replicas,
Replicas: replicas,
},
}
return deployment
}
func newService(serviceName, namespace string, labels map[string]string) *corev1.Service {
labels["app"] = serviceName
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{
"servicemesh.kubesphere.io/enabled": "true",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
},
{
Name: "mysql",
Port: 3306,
Protocol: corev1.ProtocolTCP,
},
},
Selector: labels,
Type: corev1.ServiceTypeClusterIP,
},
Status: corev1.ServiceStatus{},
}
return svc
}
func newApplication(applicationName, namespace string, labels map[string]string) *v1beta1.Application {
app := &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: applicationName,
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
},
Spec: v1beta1.ApplicationSpec{
ComponentGroupKinds: []metav1.GroupKind{
{
Group: "",
Kind: "Service",
},
{
Group: "apps",
Kind: "Deployment",
},
},
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
AddOwnerRef: true,
},
}
return app
}

View File

@@ -1,150 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"math/rand"
"path/filepath"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/apis"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestApplicationController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Application Controller Test Suite")
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "ks-core", "crds")},
AttachControlPlaneOutput: false,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = appv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
// SetupTest will setup a testing environment.
// This includes:
// - creating a Namespace to be used during the test
// - starting application controller
// - stopping application controller after the test ends
//
// Call this function at the start of each of your tests.
func SetupTest(ctx context.Context) *corev1.Namespace {
var stopCh chan struct{}
ns := &corev1.Namespace{}
BeforeEach(func() {
stopCh = make(chan struct{})
*ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
}
err := k8sClient.Create(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to create a test namespace")
mgr, err := ctrl.NewManager(cfg, ctrl.Options{MetricsBindAddress: "0"})
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
selector, _ := labels.Parse("app.kubernetes.io/name,!kubesphere.io/creator")
reconciler := &ApplicationReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Mapper: mgr.GetRESTMapper(),
ApplicationSelector: selector,
}
err = reconciler.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup application reconciler")
go func() {
err = mgr.Start(context.Background())
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
}()
})
AfterEach(func() {
close(stopCh)
err := k8sClient.Delete(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
})
return ns
}
func init() {
rand.Seed(time.Now().UnixNano())
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
func randStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}

View File

@@ -0,0 +1,396 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"k8s.io/client-go/rest"
batchv1 "k8s.io/api/batch/v1"
"kubesphere.io/utils/helm"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"kubesphere.io/api/constants"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/handler"
"kubesphere.io/utils/s3"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller"
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
helmrelease "helm.sh/helm/v3/pkg/release"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
"kubesphere.io/kubesphere/pkg/simple/client/application"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
helminstallerController = "apprelease-helminstaller"
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
)
var _ controller.Controller = &AppReleaseReconciler{}
var _ reconcile.Reconciler = &AppReleaseReconciler{}
const (
verificationAgain = 5
timeoutVerificationAgain = 600
timeoutMaxRecheck = 4
)
func (r *AppReleaseReconciler) Name() string {
return helminstallerController
}
func (r *AppReleaseReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
r.HelmExecutorOptions = mgr.HelmExecutorOptions
r.Client = mgr.GetClient()
clusterClientSet, err := clusterclient.NewClusterClientSet(mgr.GetCache())
if err != nil {
return fmt.Errorf("failed to create cluster client set: %v", err)
}
r.clusterClientSet = clusterClientSet
if r.HelmExecutorOptions == nil || r.HelmExecutorOptions.Image == "" {
return fmt.Errorf("helm executor options is nil or image is empty")
}
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
return err
}
return ctrl.NewControllerManagedBy(mgr).Named(helminstallerController).
Watches(
&clusterv1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.mapper),
builder.WithPredicates(ClusterDeletePredicate{}),
).
WithEventFilter(IgnoreAnnotationChangePredicate{AnnotationKey: appv2.TimeoutRecheck}).
For(&appv2.ApplicationRelease{}).Complete(r)
}
func (r *AppReleaseReconciler) mapper(ctx context.Context, o client.Object) (requests []reconcile.Request) {
cluster := o.(*clusterv1alpha1.Cluster)
klog.Infof("cluster %s has been deleted", cluster.Name)
apprlsList := &appv2.ApplicationReleaseList{}
opts := &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.ClusterNameLabelKey: cluster.Name})}
if err := r.List(ctx, apprlsList, opts); err != nil {
klog.Errorf("failed to list application releases: %v", err)
return requests
}
for _, apprls := range apprlsList.Items {
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: apprls.Name}})
}
return requests
}
type AppReleaseReconciler struct {
client.Client
clusterClientSet clusterclient.Interface
HelmExecutorOptions *kscontroller.HelmExecutorOptions
ossStore s3.Interface
cmStore s3.Interface
}
func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
apprls := &appv2.ApplicationRelease{}
if err := r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
timeoutRecheck := apprls.Annotations[appv2.TimeoutRecheck]
var reCheck int
if timeoutRecheck == "" {
reCheck = 0
} else {
reCheck, _ = strconv.Atoi(timeoutRecheck)
}
dstKubeConfig, runClient, err := r.getClusterInfo(apprls.GetRlsCluster())
if err != nil {
klog.Errorf("failed to get cluster info: %v", err)
return ctrl.Result{}, err
}
executor, err := r.getExecutor(apprls, dstKubeConfig, runClient)
if err != nil {
klog.Errorf("failed to get executor: %v", err)
return ctrl.Result{}, err
}
cluster, err := r.clusterClientSet.Get(apprls.GetRlsCluster())
if err != nil {
klog.Errorf("failed to get cluster: %v", err)
return ctrl.Result{}, err
}
helmKubeConfig, err := application.GetHelmKubeConfig(ctx, cluster, runClient)
if err != nil {
klog.Errorf("failed to get helm kubeconfig: %v", err)
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) || (err == nil && !cluster.DeletionTimestamp.IsZero()) {
klog.Errorf("cluster not found or deleting %s: %v", apprls.GetRlsCluster(), err)
apprls.Status.State = appv2.StatusClusterDeleted
apprls.Status.Message = fmt.Sprintf("cluster %s has been deleted", cluster.Name)
patch, _ := json.Marshal(apprls)
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(apprls, HelmReleaseFinalizer) && apprls.ObjectMeta.DeletionTimestamp.IsZero() {
expected := apprls.DeepCopy()
controllerutil.AddFinalizer(expected, HelmReleaseFinalizer)
klog.Infof("add finalizer for apprelease %s", apprls.Name)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(apprls))
}
if !apprls.ObjectMeta.DeletionTimestamp.IsZero() {
if apprls.Status.State != appv2.StatusDeleting {
result, err := r.removeAll(ctx, apprls, executor, helmKubeConfig)
if err != nil {
return result, err
}
}
wait, err := r.cleanJob(ctx, apprls, runClient)
if err != nil {
klog.Errorf("failed to clean job: %v", err)
return ctrl.Result{}, err
}
if wait {
klog.Infof("job wait, job for %s is still active", apprls.Name)
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
klog.Infof("job for %s has been cleaned", apprls.Name)
if err = r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
apprls.Finalizers = nil
err = r.Update(ctx, apprls)
if err != nil {
klog.Errorf("failed to remove finalizer for apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("remove finalizer for apprelease %s", apprls.Name)
return ctrl.Result{}, nil
}
if apprls.Status.State == "" {
apprls.Status.SpecHash = apprls.HashSpec()
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusCreating)
}
if apprls.HashSpec() != apprls.Status.SpecHash {
apprls.Status.SpecHash = apprls.HashSpec()
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusUpgrading)
}
if apprls.Status.State == appv2.StatusCreated || apprls.Status.State == appv2.StatusTimeout {
options := []helm.HelmOption{
helm.SetNamespace(apprls.GetRlsNamespace()),
helm.SetKubeconfig(dstKubeConfig),
}
release, err := executor.Get(ctx, apprls.Name, options...)
if err != nil && err.Error() == "release: not found" {
klog.Infof("helm release %s/%s not created yet,check job %s", apprls.GetRlsNamespace(), apprls.Name, apprls.Status.InstallJobName)
job := &batchv1.Job{}
if err = runClient.Get(ctx, types.NamespacedName{Namespace: apprls.GetRlsNamespace(), Name: apprls.Status.InstallJobName}, job); err != nil {
if apierrors.IsNotFound(err) {
klog.Errorf("job %s not found", apprls.Status.InstallJobName)
msg := "deploy failed, job not found"
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusDeployFailed, msg)
}
return ctrl.Result{}, err
}
if job.Status.Failed > 0 {
klog.Infof("install apprls %s job %s , failed times %d/%d", apprls.Name, job.Name, job.Status.Failed, *job.Spec.BackoffLimit+1)
}
if job.Spec.BackoffLimit != nil && job.Status.Failed > *job.Spec.BackoffLimit {
msg := fmt.Sprintf("deploy failed, job %s has failed %d times ", apprls.Status.InstallJobName, job.Status.Failed)
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusDeployFailed, msg)
}
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
if err != nil {
msg := fmt.Sprintf("%s helm create job failed err: %v", apprls.Name, err)
err = r.updateStatus(ctx, apprls, appv2.StatusFailed, msg)
return ctrl.Result{}, err
}
switch release.Info.Status {
case helmrelease.StatusFailed:
if strings.Contains(release.Info.Description, "context deadline exceeded") && reCheck < timeoutMaxRecheck {
if apprls.Status.State != appv2.StatusTimeout {
err = r.updateStatus(ctx, apprls, appv2.StatusTimeout, "install time out")
if err != nil {
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("install time out, will check status again after %d second", timeoutVerificationAgain)
return ctrl.Result{RequeueAfter: timeoutVerificationAgain * time.Second}, nil
}
deployed, err := application.UpdateHelmStatus(dstKubeConfig, release)
if err != nil {
return ctrl.Result{}, err
}
apprls.Annotations[appv2.TimeoutRecheck] = strconv.Itoa(reCheck + 1)
patch, _ := json.Marshal(apprls)
err = r.Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("update recheck times %s for %s", strconv.Itoa(reCheck+1), apprls.Name)
if deployed {
err = r.updateStatus(ctx, apprls, appv2.StatusActive, "StatusActive")
if err != nil {
klog.Errorf("failed to update apprelease %s %v", apprls.Name, err)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
return ctrl.Result{RequeueAfter: timeoutVerificationAgain * time.Second}, nil
}
err = r.updateStatus(ctx, apprls, appv2.StatusFailed, release.Info.Description)
return ctrl.Result{}, err
case helmrelease.StatusDeployed:
err = r.updateStatus(ctx, apprls, appv2.StatusActive)
return ctrl.Result{}, err
default:
klog.V(5).Infof("helm release %s/%s status %s, check again after %d second", apprls.GetRlsNamespace(), apprls.Name, release.Info.Status, verificationAgain)
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
}
if apprls.Status.State == appv2.StatusCreating || apprls.Status.State == appv2.StatusUpgrading {
return ctrl.Result{}, r.createOrUpgradeAppRelease(ctx, apprls, executor, helmKubeConfig)
}
return ctrl.Result{}, nil
}
func (r *AppReleaseReconciler) removeAll(ctx context.Context, apprls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (ct ctrl.Result, err error) {
err = r.updateStatus(ctx, apprls, appv2.StatusDeleting)
if err != nil {
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
return ctrl.Result{}, err
}
uninstallJobName, err := r.uninstall(ctx, apprls, executor, kubeconfig)
if err != nil {
klog.Errorf("failed to uninstall helm release %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
err = r.cleanStore(ctx, apprls)
if err != nil {
klog.Errorf("failed to clean store: %v", err)
return ctrl.Result{}, err
}
klog.Infof("remove apprelease %s success", apprls.Name)
if uninstallJobName != "" {
klog.Infof("try to update uninstall apprls job name %s to apprelease %s", uninstallJobName, apprls.Name)
apprls.Status.UninstallJobName = uninstallJobName
apprls.Status.LastUpdate = metav1.Now()
patch, _ := json.Marshal(apprls)
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("update uninstall apprls job name %s to apprelease %s success", uninstallJobName, apprls.Name)
}
return ctrl.Result{}, nil
}
func (r *AppReleaseReconciler) getClusterDynamicClient(clusterName string, apprls *appv2.ApplicationRelease) (*dynamic.DynamicClient, error) {
clusterClient, err := r.clusterClientSet.GetClusterClient(clusterName)
if err != nil {
klog.Errorf("failed to get cluster client: %v", err)
return nil, err
}
creator := apprls.Annotations[constants.CreatorAnnotationKey]
conf := *clusterClient.RestConfig
if creator != "" {
conf.Impersonate = rest.ImpersonationConfig{
UserName: creator,
}
}
klog.Infof("DynamicClient impersonate kubeAsUser: %s", creator)
dynamicClient, err := dynamic.NewForConfig(&conf)
return dynamicClient, err
}
func (r *AppReleaseReconciler) getClusterInfo(clusterName string) ([]byte, client.Client, error) {
cluster, err := r.clusterClientSet.Get(clusterName)
if err != nil {
return nil, nil, err
}
runtimeClient, err := r.clusterClientSet.GetRuntimeClient(clusterName)
if err != nil {
return nil, nil, err
}
return cluster.Spec.Connection.KubeConfig, runtimeClient, nil
}
func (r *AppReleaseReconciler) updateStatus(ctx context.Context, apprls *appv2.ApplicationRelease, status string, message ...string) error {
apprls.Status.State = status
if message != nil {
apprls.Status.Message = message[0]
}
apprls.Status.LastUpdate = metav1.Now()
patch, _ := json.Marshal(apprls)
return r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
}

View File

@@ -0,0 +1,110 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"strings"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"k8s.io/klog/v2"
"kubesphere.io/utils/s3"
"kubesphere.io/kubesphere/pkg/simple/client/application"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
appVersionController = "appversion-controller"
)
var _ reconcile.Reconciler = &AppVersionReconciler{}
var _ kscontroller.Controller = &AppVersionReconciler{}
type AppVersionReconciler struct {
client.Client
ossStore s3.Interface
cmStore s3.Interface
}
func (r *AppVersionReconciler) Name() string {
return appVersionController
}
func (r *AppVersionReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *AppVersionReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
r.Client = mgr.GetClient()
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
return err
}
return ctrl.NewControllerManagedBy(mgr).
Named(appVersionController).
For(&appv2.ApplicationVersion{}).
Complete(r)
}
func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
appVersion := &appv2.ApplicationVersion{}
if err := r.Client.Get(ctx, req.NamespacedName, appVersion); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
//Delete app files, non-important logic, errors will not affect the main process
if !appVersion.ObjectMeta.DeletionTimestamp.IsZero() {
err := r.deleteFile(ctx, appVersion)
if err != nil {
klog.Errorf("Failed to clean file for appversion %s: %v", appVersion.Name, err)
}
}
return ctrl.Result{}, nil
}
func (r *AppVersionReconciler) deleteFile(ctx context.Context, appVersion *appv2.ApplicationVersion) error {
defer func() {
controllerutil.RemoveFinalizer(appVersion, appv2.StoreCleanFinalizer)
err := r.Update(ctx, appVersion)
if err != nil {
klog.Errorf("Failed to remove finalizer from appversion %s: %v", appVersion.Name, err)
}
klog.Infof("Remove finalizer from appversion %s successfully", appVersion.Name)
}()
klog.Infof("ApplicationVersion %s has been deleted, try to clean file", appVersion.Name)
id := []string{appVersion.Name}
apprls := &appv2.ApplicationReleaseList{}
err := r.Client.List(ctx, apprls, client.MatchingLabels{appv2.AppVersionIDLabelKey: appVersion.Name})
if err != nil {
klog.Errorf("Failed to list ApplicationRelease: %v", err)
return err
}
if len(apprls.Items) > 0 {
klog.Infof("ApplicationVersion %s is still in use, keep file in store", appVersion.Name)
return nil
}
err = application.FailOverDelete(r.cmStore, r.ossStore, id)
if err != nil {
klog.Errorf("Fail to delete appversion %s from store: %v", appVersion.Name, err)
return err
}
klog.Infof("Delete file %s from store successfully", appVersion.Name)
return nil
}

View File

@@ -0,0 +1,31 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
type ClusterDeletePredicate struct {
predicate.Funcs
}
func (ClusterDeletePredicate) Update(e event.UpdateEvent) bool {
return false
}
func (ClusterDeletePredicate) Create(_ event.CreateEvent) bool {
return false
}
func (ClusterDeletePredicate) Delete(_ event.DeleteEvent) bool {
return true
}
func (ClusterDeletePredicate) Generic(_ event.GenericEvent) bool {
return false
}

View File

@@ -0,0 +1,213 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
batchv1 "k8s.io/api/batch/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
"kubesphere.io/utils/helm"
"sigs.k8s.io/controller-runtime/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/application"
)
func (r *AppReleaseReconciler) uninstall(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (jobName string, err error) {
klog.Infof("uninstall helm release %s", rls.Name)
creator := rls.Annotations[constants.CreatorAnnotationKey]
klog.Infof("helm impersonate kubeAsUser: %s", creator)
options := []helm.HelmOption{
helm.SetNamespace(rls.GetRlsNamespace()),
helm.SetKubeconfig(kubeconfig),
}
if jobName, err = executor.Uninstall(ctx, rls.Name, options...); err != nil {
klog.Error(err, "failed to force delete helm release")
return jobName, err
}
klog.Infof("uninstall helm release %s success,job name: %s", rls.Name, jobName)
return jobName, nil
}
func (r *AppReleaseReconciler) jobStatus(job *batchv1.Job) (active, completed, failed bool) {
active = job.Status.Active > 0
completed = (job.Spec.Completions != nil && job.Status.Succeeded >= *job.Spec.Completions) || job.Status.Succeeded > 0
failed = (job.Spec.BackoffLimit != nil && job.Status.Failed > *job.Spec.BackoffLimit) || job.Status.Failed > 0
return
}
func (r *AppReleaseReconciler) createOrUpgradeAppRelease(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) error {
clusterName := rls.GetRlsCluster()
namespace := rls.GetRlsNamespace()
klog.Infof("begin to create or upgrade %s app release %s in cluster %s ns: %s", rls.Spec.AppType, rls.Name, clusterName, namespace)
creator := rls.Annotations[constants.CreatorAnnotationKey]
klog.Infof("helm impersonate kubeAsUser: %s", creator)
options := []helm.HelmOption{
helm.SetInstall(true),
helm.SetNamespace(namespace),
helm.SetKubeAsUser(creator),
helm.SetKubeconfig(kubeconfig),
}
if rls.Spec.AppType == appv2.AppTypeHelm {
_, err := executor.Get(ctx, rls.Name, options...)
if err != nil && err.Error() == "release: not found" {
klog.Infof("release %s not found, begin to create", rls.Name)
}
if err == nil {
klog.Infof("release %s found, begin to upgrade status", rls.Name)
return r.updateStatus(ctx, rls, appv2.StatusCreated)
}
}
data, err := application.FailOverGet(r.cmStore, r.ossStore, rls.Spec.AppVersionID, r.Client, true)
if err != nil {
klog.Errorf("failed to get app version data, err: %v", err)
return err
}
options = append(options, helm.SetChartData(data))
if rls.Status.InstallJobName, err = executor.Upgrade(ctx, rls.Name, "", rls.Spec.Values, options...); err != nil {
klog.Errorf("failed to create executor job, err: %v", err)
return r.updateStatus(ctx, rls, appv2.StatusFailed, err.Error())
}
return r.updateStatus(ctx, rls, appv2.StatusCreated)
}
func (r *AppReleaseReconciler) getExecutor(apprls *appv2.ApplicationRelease, kubeConfig []byte, runClient client.Client) (executor helm.Executor, err error) {
if apprls.Spec.AppType == appv2.AppTypeHelm {
return r.getHelmExecutor(apprls, kubeConfig)
}
return r.getYamlInstaller(runClient, apprls)
}
func (r *AppReleaseReconciler) getYamlInstaller(runClient client.Client, apprls *appv2.ApplicationRelease) (executor helm.Executor, err error) {
dynamicClient, err := r.getClusterDynamicClient(apprls.GetRlsCluster(), apprls)
if err != nil {
klog.Errorf("failed to get dynamic client: %v", err)
return nil, err
}
jsonList, err := application.ReadYaml(apprls.Spec.Values)
if err != nil {
klog.Errorf("failed to read yaml: %v", err)
return nil, err
}
var gvrListInfo []application.InsInfo
for _, i := range jsonList {
gvr, utd, err := application.GetInfoFromBytes(i, runClient.RESTMapper())
if err != nil {
klog.Errorf("failed to get info from bytes: %v", err)
return nil, err
}
ins := application.InsInfo{
GroupVersionResource: gvr,
Name: utd.GetName(),
Namespace: utd.GetNamespace(),
}
gvrListInfo = append(gvrListInfo, ins)
}
return application.YamlInstaller{
Mapper: runClient.RESTMapper(),
DynamicCli: dynamicClient,
GvrListInfo: gvrListInfo,
Namespace: apprls.GetRlsNamespace(),
}, nil
}
func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease, kubeconfig []byte) (executor helm.Executor, err error) {
executorOptions := []helm.ExecutorOption{
helm.SetExecutorKubeConfig(kubeconfig),
helm.SetExecutorNamespace(apprls.GetRlsNamespace()),
helm.SetExecutorImage(r.HelmExecutorOptions.Image),
helm.SetExecutorBackoffLimit(0),
helm.SetExecutorLabels(labels.Set{
appv2.AppReleaseReferenceLabelKey: apprls.Name,
constants.KubeSphereManagedLabel: "true",
}),
helm.SetTTLSecondsAfterFinished(r.HelmExecutorOptions.JobTTLAfterFinished),
}
executor, err = helm.NewExecutor(executorOptions...)
if err != nil {
klog.Errorf("failed to create helm executor: %v", err)
return nil, err
}
return executor, err
}
func (r *AppReleaseReconciler) cleanJob(ctx context.Context, apprls *appv2.ApplicationRelease, runClient client.Client) (wait bool, err error) {
jobs := &batchv1.JobList{}
opts := []client.ListOption{client.InNamespace(apprls.GetRlsNamespace()), client.MatchingLabels{appv2.AppReleaseReferenceLabelKey: apprls.Name}}
err = runClient.List(ctx, jobs, opts...)
if err != nil {
klog.Errorf("failed to list job for %s: %v", apprls.Name, err)
return false, err
}
if len(jobs.Items) == 0 {
klog.Infof("cluster: %s namespace: %s no job found for %s", apprls.GetRlsCluster(), apprls.GetRlsNamespace(), apprls.Name)
return false, nil
}
klog.Infof("found %d jobs for %s", len(jobs.Items), apprls.Name)
for _, job := range jobs.Items {
klog.Infof("begin to clean job %s/%s", job.Namespace, job.Name)
jobActive, jobCompleted, failed := r.jobStatus(&job)
if jobActive {
klog.Infof("job %s is still active", job.Name)
return true, nil
}
if jobCompleted || failed {
deletePolicy := metav1.DeletePropagationBackground
opt := client.DeleteOptions{PropagationPolicy: &deletePolicy}
err = runClient.Delete(ctx, &job, &opt)
if err != nil {
klog.Errorf("failed to delete job %s: %v", job.Name, err)
return false, err
}
klog.Infof("job %s has been deleted", job.Name)
} else {
klog.Infof("job:%s status unknown, wait for next reconcile: %v", job.Name, job.Status)
return true, nil
}
}
klog.Infof("all job has been deleted")
return false, nil
}
func (r *AppReleaseReconciler) cleanStore(ctx context.Context, apprls *appv2.ApplicationRelease) (err error) {
name := apprls.Labels[appv2.AppVersionIDLabelKey]
appVersion := &appv2.ApplicationVersion{}
err = r.Get(ctx, client.ObjectKey{Name: name}, appVersion)
if apierrors.IsNotFound(err) {
klog.Infof("appVersion %s has been deleted, cleanup file in oss", name)
err = application.FailOverDelete(r.cmStore, r.ossStore, []string{appVersion.Name})
if err != nil {
klog.Warningf("failed to cleanup file in oss: %v", err)
return nil
}
}
klog.Infof("appVersion %s still exists, no need to cleanup file in oss", name)
return nil
}

View File

@@ -0,0 +1,266 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"kubesphere.io/utils/s3"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
helmrepo "helm.sh/helm/v3/pkg/repo"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/kubesphere/pkg/simple/client/application"
)
const helmRepoController = "helmrepo-controller"
var _ reconcile.Reconciler = &RepoReconciler{}
var _ kscontroller.Controller = &RepoReconciler{}
type RepoReconciler struct {
recorder record.EventRecorder
client.Client
ossStore s3.Interface
cmStore s3.Interface
}
func (r *RepoReconciler) Name() string {
return helmRepoController
}
func (r *RepoReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *RepoReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
r.Client = mgr.GetClient()
r.recorder = mgr.GetEventRecorderFor(helmRepoController)
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&appv2.Repo{}).
Complete(r)
}
func (r *RepoReconciler) UpdateStatus(ctx context.Context, helmRepo *appv2.Repo) error {
newRepo := &appv2.Repo{}
newRepo.Name = helmRepo.Name
newRepo.Status.State = helmRepo.Status.State
newRepo.Status.LastUpdateTime = metav1.Now()
patch, _ := json.Marshal(newRepo)
err := r.Status().Patch(ctx, newRepo, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("update status failed, error: %s", err)
return err
}
klog.Infof("update status successfully, repo: %s", helmRepo.GetName())
return nil
}
func (r *RepoReconciler) noNeedSync(ctx context.Context, helmRepo *appv2.Repo) (bool, error) {
if helmRepo.Spec.SyncPeriod == 0 {
if helmRepo.Status.State != appv2.StatusNosync {
helmRepo.Status.State = appv2.StatusNosync
klog.Infof("no sync when SyncPeriod=0, repo: %s", helmRepo.GetName())
if err := r.UpdateStatus(ctx, helmRepo); err != nil {
klog.Errorf("update status failed, error: %s", err)
return false, err
}
}
klog.Infof("no sync when SyncPeriod=0, repo: %s", helmRepo.GetName())
return true, nil
}
passed := time.Since(helmRepo.Status.LastUpdateTime.Time).Seconds()
if helmRepo.Status.State == appv2.StatusSuccessful && passed < float64(helmRepo.Spec.SyncPeriod) {
klog.Infof("last sync time is %s, passed %f, no need to sync, repo: %s", helmRepo.Status.LastUpdateTime, passed, helmRepo.GetName())
return true, nil
}
return false, nil
}
func filterVersions(versions []*helmrepo.ChartVersion) []*helmrepo.ChartVersion {
versionMap := make(map[string]*helmrepo.ChartVersion)
for _, v := range versions {
if existing, found := versionMap[v.Version]; found {
if v.Created.After(existing.Created) {
versionMap[v.Version] = v
}
} else {
versionMap[v.Version] = v
}
}
result := make([]*helmrepo.ChartVersion, 0, len(versionMap))
for _, v := range versionMap {
result = append(result, v)
}
sort.Slice(result, func(i, j int) bool {
return result[i].Created.After(result[j].Created)
})
return result
}
func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
helmRepo := &appv2.Repo{}
if err := r.Client.Get(ctx, request.NamespacedName, helmRepo); err != nil {
klog.Errorf("get helm repo failed, error: %s", err)
return reconcile.Result{}, client.IgnoreNotFound(err)
}
requeueAfter := time.Duration(helmRepo.Spec.SyncPeriod) * time.Second
noSync, err := r.noNeedSync(ctx, helmRepo)
if err != nil {
return reconcile.Result{}, err
}
if noSync {
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
helmRepo.Status.State = appv2.StatusSyncing
err = r.UpdateStatus(ctx, helmRepo)
if err != nil {
klog.Errorf("update status failed, error: %s", err)
return reconcile.Result{}, err
}
index, err := application.LoadRepoIndex(helmRepo.Spec.Url, helmRepo.Spec.Credential)
if err != nil {
klog.Errorf("load index failed, repo: %s, url: %s, err: %s", helmRepo.GetName(), helmRepo.Spec.Url, err)
return reconcile.Result{}, err
}
for appName, versions := range index.Entries {
if len(versions) == 0 {
klog.Infof("no version found for %s", appName)
continue
}
versions = filterVersions(versions)
if len(versions) > appv2.MaxNumOfVersions {
versions = versions[:appv2.MaxNumOfVersions]
}
vRequests, err := repoParseRequest(r.Client, versions, helmRepo, appName)
if err != nil {
klog.Errorf("parse request failed, error: %s", err)
return reconcile.Result{}, err
}
klog.Infof("found %d/%d versions for %s need to upgrade", len(vRequests), len(versions), appName)
if len(vRequests) == 0 {
continue
}
own := metav1.OwnerReference{
APIVersion: appv2.SchemeGroupVersion.String(),
Kind: "Repo",
Name: helmRepo.Name,
UID: helmRepo.UID,
}
if err = application.CreateOrUpdateApp(r.Client, vRequests, r.cmStore, r.ossStore, own); err != nil {
klog.Errorf("create or update app failed, error: %s", err)
return reconcile.Result{}, err
}
}
helmRepo.Status.State = appv2.StatusSuccessful
err = r.UpdateStatus(ctx, helmRepo)
if err != nil {
klog.Errorf("update status failed, error: %s", err)
return reconcile.Result{}, err
}
r.recorder.Eventf(helmRepo, corev1.EventTypeNormal, "Synced", "HelmRepo %s synced successfully", helmRepo.GetName())
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRepo *appv2.Repo, appName string) (result []application.AppRequest, err error) {
appVersionList := &appv2.ApplicationVersionList{}
opts := client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{appv2.RepoIDLabelKey: helmRepo.Name}),
}
err = cli.List(context.Background(), appVersionList, &opts)
if err != nil {
klog.Errorf("list appversion failed, error: %s", err)
return nil, err
}
appVersionDigestMap := make(map[string]string)
for _, i := range appVersionList.Items {
key := fmt.Sprintf("%s-%s", i.GetLabels()[appv2.AppIDLabelKey], i.Spec.VersionName)
appVersionDigestMap[key] = i.Spec.Digest
}
for _, ver := range versions {
ver.Version = application.FormatVersion(ver.Version)
shortName := application.GenerateShortNameMD5Hash(ver.Name)
key := fmt.Sprintf("%s-%s-%s", helmRepo.Name, shortName, ver.Version)
dig := appVersionDigestMap[key]
if dig == ver.Digest {
continue
} else {
klog.Infof("digest not match, key: %s, digest: %s, ver.Digest: %s", key, dig, ver.Digest)
}
vRequest := application.AppRequest{
RepoName: helmRepo.Name,
VersionName: ver.Version,
AppName: fmt.Sprintf("%s-%s", helmRepo.Name, shortName),
AliasName: appName,
OriginalName: appName,
AppHome: ver.Home,
Icon: ver.Icon,
Digest: ver.Digest,
Description: ver.Description,
Abstraction: ver.Description,
Maintainers: application.GetMaintainers(ver.Maintainers),
AppType: appv2.AppTypeHelm,
Workspace: helmRepo.GetWorkspace(),
Credential: helmRepo.Spec.Credential,
FromRepo: true,
}
url := ver.URLs[0]
methodList := []string{"https://", "http://", "s3://"}
needContact := true
for _, method := range methodList {
if strings.HasPrefix(url, method) {
needContact = false
break
}
}
if needContact {
url = strings.TrimSuffix(helmRepo.Spec.Url, "/") + "/" + url
}
vRequest.PullUrl = url
result = append(result, vRequest)
}
return result, nil
}

View File

@@ -0,0 +1,31 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"sigs.k8s.io/controller-runtime/pkg/event"
)
type IgnoreAnnotationChangePredicate struct {
AnnotationKey string
}
func (p IgnoreAnnotationChangePredicate) Create(e event.CreateEvent) bool {
return true
}
func (p IgnoreAnnotationChangePredicate) Delete(e event.DeleteEvent) bool {
return true
}
func (p IgnoreAnnotationChangePredicate) Update(e event.UpdateEvent) bool {
return e.ObjectOld.GetAnnotations()[p.AnnotationKey] == e.ObjectNew.GetAnnotations()[p.AnnotationKey]
}
func (p IgnoreAnnotationChangePredicate) Generic(e event.GenericEvent) bool {
return true
}

View File

@@ -1,18 +1,7 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package certificatesigningrequest
@@ -23,228 +12,87 @@ import (
certificatesv1 "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
certificatesinformers "k8s.io/client-go/informers/certificates/v1"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
certificateslisters "k8s.io/client-go/listers/certificates/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is csrSynced
successSynced = "Synced"
// is csrSynced successfully
messageResourceSynced = "CertificateSigningRequest csrSynced successfully"
controllerName = "csr-controller"
controllerName = "csr"
userKubeConfigSecretNameFormat = "kubeconfig-%s"
kubeconfigFileName = "config"
privateKeyAnnotation = "kubesphere.io/private-key"
)
type Controller struct {
k8sclient kubernetes.Interface
csrInformer certificatesinformers.CertificateSigningRequestInformer
csrLister certificateslisters.CertificateSigningRequestLister
csrSynced cache.InformerSynced
cmSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
kubeconfigOperator kubeconfig.Interface
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
type Reconciler struct {
client.Client
recorder record.EventRecorder
}
func NewController(k8sClient kubernetes.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer,
configMapInformer corev1informers.ConfigMapInformer, config *rest.Config) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &Controller{
k8sclient: k8sClient,
csrInformer: csrInformer,
csrLister: csrInformer.Lister(),
csrSynced: csrInformer.Informer().HasSynced,
cmSynced: configMapInformer.Informer().HasSynced,
kubeconfigOperator: kubeconfig.NewOperator(k8sClient, configMapInformer.Lister(), config),
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CertificateSigningRequest"),
recorder: recorder,
}
klog.Info("Setting up event handlers")
csrInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.enqueueCertificateSigningRequest,
UpdateFunc: func(old, new interface{}) {
ctl.enqueueCertificateSigningRequest(new)
},
DeleteFunc: ctl.enqueueCertificateSigningRequest,
})
return ctl
func (r *Reconciler) Name() string {
return controllerName
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the csrInformer factories to begin populating the csrInformer caches
klog.Info("Starting CSR controller")
// Wait for the caches to be csrSynced before starting workers
klog.Info("Waiting for csrInformer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.csrSynced, c.cmSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.Client = mgr.GetClient()
return builder.
ControllerManagedBy(mgr).
For(&certificatesv1.CertificateSigningRequest{},
builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
csr := object.(*certificatesv1.CertificateSigningRequest)
return csr.Labels[constants.UsernameLabelKey] != ""
})),
).
Named(controllerName).
Complete(r)
}
func (c *Controller) enqueueCertificateSigningRequest(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the csrInformer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be csrSynced.
if err := c.reconcile(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully csrSynced %s:%s", "key", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) reconcile(key string) error {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// Get the CertificateSigningRequest with this name
csr, err := c.csrLister.Get(key)
if err != nil {
// The resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("csr '%s' in work queue no longer exists", key))
return nil
}
klog.Error(err)
return err
csr := &certificatesv1.CertificateSigningRequest{}
if err := r.Get(ctx, req.NamespacedName, csr); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// csr create by kubesphere auto approve
// csr create by kubesphere auto approval
if username := csr.Labels[constants.UsernameLabelKey]; username != "" {
err = c.Approve(csr)
if err != nil {
klog.Error(err)
return err
if err := r.Approve(csr); err != nil {
return ctrl.Result{}, err
}
// certificate data is not empty
if len(csr.Status.Certificate) > 0 {
err = c.UpdateKubeconfig(csr)
if err != nil {
if err := r.UpdateKubeConfig(ctx, username, csr); err != nil {
// kubeconfig not generated
klog.Error(err)
return err
return ctrl.Result{}, err
}
// release
err := c.k8sclient.CertificatesV1().CertificateSigningRequests().Delete(context.Background(), csr.Name, *metav1.NewDeleteOptions(0))
if err != nil {
klog.Error(err)
return err
if err := r.Delete(ctx, csr, &client.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)}); err != nil {
return ctrl.Result{}, err
}
}
}
c.recorder.Event(csr, corev1.EventTypeNormal, successSynced, messageResourceSynced)
return nil
r.recorder.Event(csr, corev1.EventTypeNormal, kscontroller.Synced, kscontroller.MessageResourceSynced)
return ctrl.Result{}, nil
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) Approve(csr *certificatesv1.CertificateSigningRequest) error {
func (r *Reconciler) Approve(csr *certificatesv1.CertificateSigningRequest) error {
// is approved
if len(csr.Status.Certificate) > 0 {
return nil
@@ -262,20 +110,50 @@ func (c *Controller) Approve(csr *certificatesv1.CertificateSigningRequest) erro
}
// approve csr
_, err := c.k8sclient.CertificatesV1().CertificateSigningRequests().UpdateApproval(context.Background(), csr.Name, csr, metav1.UpdateOptions{})
if err != nil {
klog.Errorln(err)
if err := r.SubResource("approval").Update(context.Background(), csr, &client.SubResourceUpdateOptions{SubResourceBody: csr}); err != nil {
return err
}
return nil
}
func (c *Controller) UpdateKubeconfig(csr *certificatesv1.CertificateSigningRequest) error {
username := csr.Labels[constants.UsernameLabelKey]
err := c.kubeconfigOperator.UpdateKubeconfig(username, csr)
func (r *Reconciler) UpdateKubeConfig(ctx context.Context, username string, csr *certificatesv1.CertificateSigningRequest) error {
secretName := fmt.Sprintf(userKubeConfigSecretNameFormat, username)
secret := &corev1.Secret{}
if err := r.Get(ctx, types.NamespacedName{Namespace: constants.KubeSphereNamespace, Name: secretName}, secret); err != nil {
return client.IgnoreNotFound(err)
}
secret = applyCert(secret, csr)
if err := r.Update(ctx, secret); err != nil {
klog.Errorf("Failed to update secret %s: %v", secretName, err)
return err
}
return nil
}
func applyCert(secret *corev1.Secret, csr *certificatesv1.CertificateSigningRequest) *corev1.Secret {
data := secret.Data[kubeconfigFileName]
kubeconfig, err := clientcmd.Load(data)
if err != nil {
klog.Error(err)
return secret
}
return err
username := secret.Labels[constants.UsernameLabelKey]
privateKey := csr.Annotations[privateKeyAnnotation]
clientCert := csr.Status.Certificate
kubeconfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
username: {
ClientKeyData: []byte(privateKey),
ClientCertificateData: clientCert,
},
}
data, err = clientcmd.Write(*kubeconfig)
if err != nil {
return secret
}
delete(secret.Annotations, "csr")
secret.StringData = map[string]string{kubeconfigFileName: string(data)}
return secret
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,6 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster

View File

@@ -1,85 +1,88 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
v1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
)
type ValidatingHandler struct {
Client client.Client
decoder *admission.Decoder
const webhookName = "cluster-webhook"
func (v *Webhook) Name() string {
return webhookName
}
var _ admission.DecoderInjector = &ValidatingHandler{}
// InjectDecoder injects the decoder into a ValidatingHandler.
func (h *ValidatingHandler) InjectDecoder(d *admission.Decoder) error {
h.decoder = d
return nil
func (v *Webhook) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
// Handle handles admission requests.
func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
if req.Operation != v1.Update {
return admission.Allowed("")
}
var _ kscontroller.Controller = &Webhook{}
var _ admission.CustomValidator = &Webhook{}
newCluster := &clusterv1alpha1.Cluster{}
if err := h.decoder.DecodeRaw(req.Object, newCluster); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
type Webhook struct {
}
oldCluster := &clusterv1alpha1.Cluster{}
if err := h.decoder.DecodeRaw(req.OldObject, oldCluster); err != nil {
return admission.Errored(http.StatusBadRequest, err)
func (v *Webhook) SetupWithManager(mgr *kscontroller.Manager) error {
return builder.WebhookManagedBy(mgr).
For(&clusterv1alpha1.Cluster{}).
WithValidator(v).
Complete()
}
func (v *Webhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (v *Webhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
oldCluster, ok := oldObj.(*clusterv1alpha1.Cluster)
if !ok {
return nil, fmt.Errorf("expected a Cluster but got a %T", oldObj)
}
newCluster, ok := newObj.(*clusterv1alpha1.Cluster)
if !ok {
return nil, fmt.Errorf("expected a Cluster but got a %T", newObj)
}
// The cluster created for the first time has no status information
if oldCluster.Status.UID == "" {
return admission.Allowed("")
return nil, nil
}
clusterConfig, err := clientcmd.RESTConfigFromKubeConfig(newCluster.Spec.Connection.KubeConfig)
if err != nil {
return admission.Denied(fmt.Sprintf("failed to load cluster config for %s: %s", newCluster.Name, err))
return nil, fmt.Errorf("failed to load cluster config for %s: %s", newCluster.Name, err)
}
clusterClient, err := kubernetes.NewForConfig(clusterConfig)
if err != nil {
return admission.Denied(err.Error())
return nil, err
}
kubeSystem, err := clusterClient.CoreV1().Namespaces().Get(ctx, metav1.NamespaceSystem, metav1.GetOptions{})
if err != nil {
return admission.Denied(err.Error())
return nil, err
}
if oldCluster.Status.UID != kubeSystem.UID {
return admission.Denied("this kubeconfig corresponds to a different cluster than the previous one, you need to make sure that kubeconfig is not from another cluster")
return nil, errors.New("this kubeconfig corresponds to a different cluster than the previous one, you need to make sure that kubeconfig is not from another cluster")
}
return admission.Allowed("")
return nil, nil
}
func (v *Webhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}

View File

@@ -1,65 +1,132 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster
import (
"os"
"context"
"errors"
"time"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/utils/helm"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"kubesphere.io/kubesphere/pkg/config"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/hashutil"
)
func buildKubeconfigFromRestConfig(config *rest.Config) ([]byte, error) {
apiConfig := api.NewConfig()
const releaseName = "ks-core"
apiCluster := &api.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
func configChanged(cluster *clusterv1alpha1.Cluster) bool {
return hashutil.FNVString(cluster.Spec.Config) != cluster.Annotations[constants.ConfigHashAnnotation]
}
func setConfigHash(cluster *clusterv1alpha1.Cluster) {
configHash := hashutil.FNVString(cluster.Spec.Config)
if cluster.Annotations == nil {
cluster.Annotations = map[string]string{
constants.ConfigHashAnnotation: configHash,
}
} else {
cluster.Annotations[constants.ConfigHashAnnotation] = configHash
}
}
func installKSCoreInMemberCluster(kubeConfig []byte, jwtSecret, chartPath string, chartConfig []byte) error {
helmConf, err := helm.InitHelmConf(kubeConfig, constants.KubeSphereNamespace)
if err != nil {
return err
}
// generated kubeconfig will be used by cluster federation, CAFile is not
// accepted by kubefed, so we need read CAFile
if len(apiCluster.CertificateAuthorityData) == 0 && len(config.CAFile) != 0 {
caData, err := os.ReadFile(config.CAFile)
if err != nil {
return nil, err
if chartPath == "" {
chartPath = "/var/helm-charts/ks-core"
}
chart, err := loader.Load(chartPath) // in-container chart path
if err != nil {
return err
}
// values example:
// map[string]interface{}{
// "nestedKey": map[string]interface{}{
// "simpleKey": "simpleValue",
// },
// }
values := make(map[string]interface{})
if chartConfig != nil {
if err = yaml.Unmarshal(chartConfig, &values); err != nil {
return err
}
}
// Override some necessary values
values["role"] = "member"
// disable upgrade to prevent execution of ks-upgrade
values["upgrade"] = map[string]interface{}{
"enabled": false,
}
if err = unstructured.SetNestedField(values, jwtSecret, "authentication", "issuer", "jwtSecret"); err != nil {
return err
}
helmStatus := action.NewStatus(helmConf)
if _, err = helmStatus.Run(releaseName); err != nil {
if !errors.Is(err, driver.ErrReleaseNotFound) {
return err
}
apiCluster.CertificateAuthorityData = caData
// the release not exists
install := action.NewInstall(helmConf)
install.Namespace = constants.KubeSphereNamespace
install.CreateNamespace = true
install.Wait = true
install.ReleaseName = releaseName
install.Timeout = time.Minute * 5
if _, err = install.Run(chart, values); err != nil {
return err
}
return nil
}
apiConfig.Clusters["kubernetes"] = apiCluster
apiConfig.AuthInfos["kubernetes-admin"] = &api.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
Token: config.BearerToken,
TokenFile: config.BearerTokenFile,
Username: config.Username,
Password: config.Password,
upgrade := action.NewUpgrade(helmConf)
upgrade.Namespace = constants.KubeSphereNamespace
upgrade.Install = true
upgrade.Wait = true
upgrade.Timeout = time.Minute * 5
if _, err = upgrade.Run(releaseName, chart, values); err != nil {
return err
}
apiConfig.Contexts["kubernetes-admin@kubernetes"] = &api.Context{
Cluster: "kubernetes",
AuthInfo: "kubernetes-admin",
}
apiConfig.CurrentContext = "kubernetes-admin@kubernetes"
return clientcmd.Write(*apiConfig)
return nil
}
func getKubeSphereConfig(ctx context.Context, client runtimeclient.Client) (*config.Config, error) {
cm := &corev1.ConfigMap{}
if err := client.Get(ctx, types.NamespacedName{Name: constants.KubeSphereConfigName, Namespace: constants.KubeSphereNamespace}, cm); err != nil {
return nil, err
}
configData, err := config.FromConfigMap(cm)
if err != nil {
return nil, err
}
return configData, nil
}
func hasCondition(conditions []clusterv1alpha1.ClusterCondition, conditionsType clusterv1alpha1.ClusterConditionType) bool {
for _, condition := range conditions {
if condition.Type == conditionsType && condition.Status == corev1.ConditionTrue {
return true
}
}
return false
}

View File

@@ -1,854 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
kubeclient "k8s.io/client-go/kubernetes"
k8sscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
fedapis "sigs.k8s.io/kubefed/pkg/apis"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
"kubesphere.io/api/types/v1beta1"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1"
)
var (
// Policy rules allowing full access to resources in the cluster
// or namespace.
namespacedPolicyRules = []rbacv1.PolicyRule{
{
Verbs: []string{rbacv1.VerbAll},
APIGroups: []string{rbacv1.APIGroupAll},
Resources: []string{rbacv1.ResourceAll},
},
}
clusterPolicyRules = []rbacv1.PolicyRule{
namespacedPolicyRules[0],
{
NonResourceURLs: []string{rbacv1.NonResourceAll},
Verbs: []string{"get"},
},
}
localSchemeBuilder = runtime.SchemeBuilder{
fedapis.AddToScheme,
k8sscheme.AddToScheme,
v1beta1.AddToScheme,
}
)
const (
tokenKey = "token"
serviceAccountSecretTimeout = 30 * time.Second
kubefedManagedSelector = "kubefed.io/managed=true"
)
// joinClusterForNamespace registers a cluster with a KubeFed control
// plane. The KubeFed namespace in the joining cluster is provided by
// the joiningNamespace parameter.
func joinClusterForNamespace(hostConfig, clusterConfig *rest.Config, kubefedNamespace,
joiningNamespace, hostClusterName, joiningClusterName, secretName string, labels map[string]string,
scope apiextv1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
hostClientset, err := HostClientset(hostConfig)
if err != nil {
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
return nil, err
}
clusterClientset, err := ClusterClientset(clusterConfig)
if err != nil {
klog.V(2).Infof("Failed to get joining cluster clientset: %v", err)
return nil, err
}
scheme := runtime.NewScheme()
localSchemeBuilder.AddToScheme(scheme)
client, err := client.New(hostConfig, client.Options{Scheme: scheme})
if err != nil {
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
return nil, err
}
klog.V(2).Infof("Performing preflight checks.")
err = performPreflightChecks(clusterClientset, joiningClusterName, hostClusterName, joiningNamespace, errorOnExisting)
if err != nil {
return nil, err
}
klog.V(2).Infof("Creating %s namespace in joining cluster", joiningNamespace)
_, err = createKubeFedNamespace(clusterClientset, joiningNamespace, joiningClusterName, dryRun)
if err != nil {
klog.V(2).Infof("Error creating %s namespace in joining cluster: %v", joiningNamespace, err)
return nil, err
}
klog.V(2).Infof("Created %s namespace in joining cluster", joiningNamespace)
saName, err := createAuthorizedServiceAccount(clusterClientset, joiningNamespace, joiningClusterName, hostClusterName, scope, dryRun, errorOnExisting)
if err != nil {
return nil, err
}
secret, _, err := populateSecretInHostCluster(clusterClientset, hostClientset,
saName, kubefedNamespace, joiningNamespace, joiningClusterName, secretName, dryRun)
if err != nil {
klog.V(2).Infof("Error creating secret in host cluster: %s due to: %v", hostClusterName, err)
return nil, err
}
var disabledTLSValidations []fedv1b1.TLSValidation
if clusterConfig.TLSClientConfig.Insecure {
disabledTLSValidations = append(disabledTLSValidations, fedv1b1.TLSAll)
}
kubefedCluster, err := createKubeFedCluster(clusterConfig, client, joiningClusterName, clusterConfig.Host,
secret.Name, kubefedNamespace, clusterConfig.CAData, disabledTLSValidations, labels, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Failed to create federated cluster resource: %v", err)
return nil, err
}
klog.V(2).Info("Created federated cluster resource")
return kubefedCluster, nil
}
// performPreflightChecks checks that the host and joining clusters are in
// a consistent state.
func performPreflightChecks(clusterClientset kubeclient.Interface, name, hostClusterName,
kubefedNamespace string, errorOnExisting bool) error {
// Make sure there is no existing service account in the joining cluster.
saName := util.ClusterServiceAccountName(name, hostClusterName)
_, err := clusterClientset.CoreV1().ServiceAccounts(kubefedNamespace).Get(context.Background(), saName, metav1.GetOptions{})
switch {
case apierrors.IsNotFound(err):
return nil
case err != nil:
return err
case errorOnExisting:
return errors.Errorf("service account: %s already exists in joining cluster: %s", saName, name)
default:
klog.V(2).Infof("Service account %s already exists in joining cluster %s", saName, name)
return nil
}
}
// createKubeFedCluster creates a federated cluster resource that associates
// the cluster and secret.
func createKubeFedCluster(clusterConfig *rest.Config, client client.Client, joiningClusterName, apiEndpoint,
secretName, kubefedNamespace string, caBundle []byte, disabledTLSValidations []fedv1b1.TLSValidation,
labels map[string]string, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
fedCluster := &fedv1b1.KubeFedCluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: kubefedNamespace,
Name: joiningClusterName,
Labels: labels,
},
Spec: fedv1b1.KubeFedClusterSpec{
APIEndpoint: apiEndpoint,
CABundle: caBundle,
SecretRef: fedv1b1.LocalSecretReference{
Name: secretName,
},
DisabledTLSValidations: disabledTLSValidations,
},
}
if dryRun {
return fedCluster, nil
}
existingFedCluster := &fedv1b1.KubeFedCluster{}
key := types.NamespacedName{Namespace: kubefedNamespace, Name: joiningClusterName}
err := client.Get(context.TODO(), key, existingFedCluster)
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not retrieve federated cluster %s due to %v", joiningClusterName, err)
return nil, err
case err == nil && errorOnExisting:
return nil, errors.Errorf("federated cluster %s already exists in host cluster", joiningClusterName)
case err == nil:
if retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err = client.Get(context.TODO(), key, existingFedCluster); err != nil {
return err
}
existingFedCluster.Spec = fedCluster.Spec
existingFedCluster.Labels = labels
return client.Update(context.TODO(), existingFedCluster)
}); retryErr != nil {
klog.V(2).Infof("Could not update federated cluster %s due to %v", fedCluster.Name, err)
return nil, err
}
return existingFedCluster, nil
default:
if err = checkWorkspaces(clusterConfig, client, fedCluster); err != nil {
klog.V(2).Infof("Validate federated cluster %s failed due to %v", fedCluster.Name, err)
return nil, err
}
if err = client.Create(context.TODO(), fedCluster); err != nil {
klog.V(2).Infof("Could not create federated cluster %s due to %v", fedCluster.Name, err)
return nil, err
}
return fedCluster, nil
}
}
// createKubeFedNamespace creates the kubefed namespace in the cluster
// associated with clusterClientset, if it doesn't already exist.
func createKubeFedNamespace(clusterClientset kubeclient.Interface, kubefedNamespace,
joiningClusterName string, dryRun bool) (*corev1.Namespace, error) {
fedNamespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubefedNamespace,
},
}
if dryRun {
return fedNamespace, nil
}
_, err := clusterClientset.CoreV1().Namespaces().Get(context.Background(), kubefedNamespace, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
klog.V(2).Infof("Could not get %s namespace: %v", kubefedNamespace, err)
return nil, err
}
if err == nil {
klog.V(2).Infof("Already existing %s namespace", kubefedNamespace)
return fedNamespace, nil
}
// Not found, so create.
_, err = clusterClientset.CoreV1().Namespaces().Create(context.Background(), fedNamespace, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
klog.V(2).Infof("Could not create %s namespace: %v", kubefedNamespace, err)
return nil, err
}
return fedNamespace, nil
}
// createAuthorizedServiceAccount creates a service account and grants
// the privileges required by the KubeFed control plane to manage
// resources in the joining cluster. The name of the created service
// account is returned on success.
func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface,
namespace, joiningClusterName, hostClusterName string,
scope apiextv1.ResourceScope, dryRun, errorOnExisting bool) (string, error) {
klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName)
saName, err := createServiceAccountWithSecret(joiningClusterClientset, namespace,
joiningClusterName, hostClusterName, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v",
saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created service account: %s in joining cluster: %s", saName, joiningClusterName)
if scope == apiextv1.NamespaceScoped {
klog.V(2).Infof("Creating role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
err = createRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating role and binding for service account: %s in joining cluster: %s due to: %v", saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created role and binding for service account: %s in joining cluster: %s",
saName, joiningClusterName)
klog.V(2).Infof("Creating health check cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
err = createHealthCheckClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName,
dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating health check cluster role and binding for service account: %s in joining cluster: %s due to: %v",
saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created health check cluster role and binding for service account: %s in joining cluster: %s",
saName, joiningClusterName)
} else {
klog.V(2).Infof("Creating cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
err = createClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating cluster role and binding for service account: %s in joining cluster: %s due to: %v",
saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created cluster role and binding for service account: %s in joining cluster: %s",
saName, joiningClusterName)
}
return saName, nil
}
// createServiceAccountWithSecret creates a service account and secret in the cluster associated
// with clusterClientset with credentials that will be used by the host cluster
// to access its API server.
func createServiceAccountWithSecret(clusterClientset kubeclient.Interface, namespace,
joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) {
saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName)
if dryRun {
return saName, nil
}
ctx := context.Background()
sa, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Get(ctx, saName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
sa = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: saName,
Namespace: namespace,
},
}
// We must create the sa first, then create the associated secret, and update the sa at last.
// Or the kube-controller-manager will delete the secret.
sa, err = clusterClientset.CoreV1().ServiceAccounts(namespace).Create(ctx, sa, metav1.CreateOptions{})
switch {
case apierrors.IsAlreadyExists(err) && errorOnExisting:
klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName)
return "", err
case err != nil && !apierrors.IsAlreadyExists(err):
klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
return "", err
}
} else {
return "", err
}
}
if len(sa.Secrets) > 0 {
return saName, nil
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-token-", saName),
Namespace: namespace,
Annotations: map[string]string{
corev1.ServiceAccountNameKey: saName,
},
},
Type: corev1.SecretTypeServiceAccountToken,
}
// After kubernetes v1.24, kube-controller-manger will not create the default secret for
// service account. http://kep.k8s.io/2800
// Create a default secret.
secret, err = clusterClientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
klog.V(2).Infof("Could not create secret for service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
return "", err
}
// At last, update the service account.
sa.Secrets = append(sa.Secrets, corev1.ObjectReference{Name: secret.Name})
_, err = clusterClientset.CoreV1().ServiceAccounts(namespace).Update(ctx, sa, metav1.UpdateOptions{})
switch {
case err != nil:
klog.Infof("Could not update service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
return "", err
default:
return saName, nil
}
}
func bindingSubjects(saName, namespace string) []rbacv1.Subject {
return []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
}
}
// createClusterRoleAndBinding creates an RBAC cluster role and
// binding that allows the service account identified by saName to
// access all resources in all namespaces in the cluster associated
// with clientset.
func createClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
if dryRun {
return nil
}
roleName := util.RoleName(saName)
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: clusterPolicyRules,
}
existingRole, err := clientset.RbacV1().ClusterRoles().Get(context.Background(), roleName, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get cluster role for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
existingRole.Rules = role.Rules
_, err := clientset.RbacV1().ClusterRoles().Update(context.Background(), existingRole, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
default: // role was not found
_, err := clientset.RbacV1().ClusterRoles().Create(context.Background(), role, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
// TODO: This should limit its access to only necessary resources.
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Subjects: bindingSubjects(saName, namespace),
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: roleName,
},
}
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(context.Background(), binding.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get cluster role binding for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
// must be deleted and recreated with the correct roleRef
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
err = clientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), existingBinding.Name, metav1.DeleteOptions{})
if err != nil {
klog.V(2).Infof("Could not delete existing cluster role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
} else {
existingBinding.Subjects = binding.Subjects
_, err := clientset.RbacV1().ClusterRoleBindings().Update(context.Background(), existingBinding, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
default:
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
return nil
}
// createRoleAndBinding creates an RBAC role and binding
// that allows the service account identified by saName to access all
// resources in the specified namespace.
func createRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
if dryRun {
return nil
}
roleName := util.RoleName(saName)
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: namespacedPolicyRules,
}
existingRole, err := clientset.RbacV1().Roles(namespace).Get(context.Background(), roleName, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not retrieve role for service account %s in joining cluster %s due to %v", saName, clusterName, err)
return err
case errorOnExisting && err == nil:
return errors.Errorf("role for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
existingRole.Rules = role.Rules
_, err = clientset.RbacV1().Roles(namespace).Update(context.Background(), existingRole, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
default:
_, err := clientset.RbacV1().Roles(namespace).Create(context.Background(), role, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Subjects: bindingSubjects(saName, namespace),
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "Role",
Name: roleName,
},
}
existingBinding, err := clientset.RbacV1().RoleBindings(namespace).Get(context.Background(), binding.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not retrieve role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("role binding for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
// must be deleted and recreated with the correct roleRef
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
err = clientset.RbacV1().RoleBindings(namespace).Delete(context.Background(), existingBinding.Name, metav1.DeleteOptions{})
if err != nil {
klog.V(2).Infof("Could not delete existing role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
_, err = clientset.RbacV1().RoleBindings(namespace).Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
} else {
existingBinding.Subjects = binding.Subjects
_, err = clientset.RbacV1().RoleBindings(namespace).Update(context.Background(), existingBinding, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
}
default:
_, err = clientset.RbacV1().RoleBindings(namespace).Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
return nil
}
// createHealthCheckClusterRoleAndBinding creates an RBAC cluster role and
// binding that allows the service account identified by saName to
// access the health check path of the cluster.
func createHealthCheckClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
if dryRun {
return nil
}
roleName := util.HealthCheckRoleName(saName, namespace)
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"Get"},
NonResourceURLs: []string{"/healthz"},
},
// The cluster client expects to be able to list nodes to retrieve zone and region details.
// TODO(marun) Consider making zone/region retrieval optional
{
Verbs: []string{"list"},
APIGroups: []string{""},
Resources: []string{"nodes"},
},
},
}
existingRole, err := clientset.RbacV1().ClusterRoles().Get(context.Background(), role.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get health check cluster role for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("health check cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
existingRole.Rules = role.Rules
_, err := clientset.RbacV1().ClusterRoles().Update(context.Background(), existingRole, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update health check cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
default: // role was not found
_, err := clientset.RbacV1().ClusterRoles().Create(context.Background(), role, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create health check cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Subjects: bindingSubjects(saName, namespace),
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: roleName,
},
}
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(context.Background(), binding.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get health check cluster role binding for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("health check cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
// must be deleted and recreated with the correct roleRef
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
err = clientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), existingBinding.Name, metav1.DeleteOptions{})
if err != nil {
klog.V(2).Infof("Could not delete existing health check cluster role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
} else {
existingBinding.Subjects = binding.Subjects
_, err := clientset.RbacV1().ClusterRoleBindings().Update(context.Background(), existingBinding, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
default:
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
return nil
}
// populateSecretInHostCluster copies the service account secret for saName
// from the cluster referenced by clusterClientset to the client referenced by
// hostClientset, putting it in a secret named secretName in the provided
// namespace.
func populateSecretInHostCluster(clusterClientset, hostClientset kubeclient.Interface,
saName, hostNamespace, joiningNamespace, joiningClusterName, secretName string,
dryRun bool) (*corev1.Secret, []byte, error) {
klog.V(2).Infof("Creating cluster credentials secret in host cluster")
if dryRun {
dryRunSecret := &corev1.Secret{}
dryRunSecret.Name = secretName
return dryRunSecret, nil, nil
}
// Get the secret from the joining cluster.
var secret *corev1.Secret
err := wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) {
sa, err := clusterClientset.CoreV1().ServiceAccounts(joiningNamespace).Get(context.Background(), saName,
metav1.GetOptions{})
if err != nil {
return false, nil
}
for _, objReference := range sa.Secrets {
saSecretName := objReference.Name
var err error
secret, err = clusterClientset.CoreV1().Secrets(joiningNamespace).Get(context.Background(), saSecretName, metav1.GetOptions{})
if err != nil {
return false, nil
}
if secret.Type == corev1.SecretTypeServiceAccountToken {
klog.V(2).Infof("Using secret named: %s", secret.Name)
return true, nil
}
}
return false, nil
})
if err != nil {
klog.V(2).Infof("Could not get service account secret from joining cluster: %v", err)
return nil, nil, err
}
token, ok := secret.Data[tokenKey]
if !ok {
return nil, nil, errors.Errorf("Key %q not found in service account secret", tokenKey)
}
// Create a secret in the host cluster containing the token.
v1Secret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: hostNamespace,
},
Data: map[string][]byte{
tokenKey: token,
},
}
if secretName == "" {
v1Secret.GenerateName = joiningClusterName + "-"
} else {
v1Secret.Name = secretName
}
var v1SecretResult *corev1.Secret
_, err = hostClientset.CoreV1().Secrets(hostNamespace).Get(context.Background(), v1Secret.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Create(context.Background(), &v1Secret, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create secret in host cluster: %v", err)
return nil, nil, err
}
return v1SecretResult, nil, nil
}
klog.V(2).Infof("Could not get secret %s in host cluster: %v", v1Secret.Name, err)
return nil, nil, err
} else {
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Update(context.Background(), &v1Secret, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Update secret %s in host cluster failed: %v", v1Secret.Name, err)
return nil, nil, err
}
}
// caBundle is optional so no error is suggested if it is not
// found in the secret.
caBundle := secret.Data["ca.crt"]
klog.V(2).Infof("Created secret in host cluster named: %s", v1SecretResult.Name)
return v1SecretResult, caBundle, nil
}
func checkWorkspaces(clusterConfig *rest.Config, hostClient client.Client, cluster *fedv1b1.KubeFedCluster) error {
tenantclient, err := v1alpha1.NewForConfig(clusterConfig)
if err != nil {
return err
}
workspaces, err := tenantclient.Workspaces().List(context.TODO(), metav1.ListOptions{LabelSelector: kubefedManagedSelector})
if err != nil {
return err
}
// Workspaces with the `kubefed.io/managed: true` label will be deleted if the FederatedWorkspace's Clusters don't include the cluster.
// The user needs to remove the label or delete the workspace manually.
for _, ws := range workspaces.Items {
fedWorkspace := &v1beta1.FederatedWorkspace{}
key := types.NamespacedName{Name: ws.Name}
err := hostClient.Get(context.TODO(), key, fedWorkspace)
if err != nil {
// Continue to check next workspace, when it's not exist in the host.
if apierrors.IsNotFound(err) {
continue
}
return err
}
if !containsCluster(fedWorkspace.Spec.Placement, cluster.Name) {
denied := errors.Errorf("The workspace %s is found in the target member cluster %s, which is conflict with the workspace on host", ws.Name, cluster.Name)
return denied
}
}
return nil
}
func containsCluster(placement v1beta1.GenericPlacementFields, str string) bool {
// Use selector if clusters are nil. But we ignore selector here.
if placement.Clusters == nil {
return true
}
for _, s := range placement.Clusters {
if s.Name == str {
return true
}
}
return false
}

View File

@@ -0,0 +1,294 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster
import (
"bytes"
"context"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"time"
certificatesv1 "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
"kubesphere.io/kubesphere/pkg/utils/pkiutil"
)
func (r *Reconciler) updateKubeConfigExpirationDateCondition(
ctx context.Context, cluster *clusterv1alpha1.Cluster, clusterClient client.Client, config *rest.Config,
) error {
// we don't need to check member clusters which using proxy mode, their certs are managed and will be renewed by tower.
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
return nil
}
klog.V(4).Infof("sync KubeConfig expiration date for cluster %s", cluster.Name)
cert, err := parseKubeConfigCert(config)
if err != nil {
return fmt.Errorf("parseKubeConfigCert for cluster %s failed: %v", cluster.Name, err)
}
if cert == nil || cert.NotAfter.IsZero() {
// delete the KubeConfigCertExpiresInSevenDays condition if it has
conditions := make([]clusterv1alpha1.ClusterCondition, 0)
for _, condition := range cluster.Status.Conditions {
if condition.Type == clusterv1alpha1.ClusterKubeConfigCertExpiresInSevenDays {
continue
}
conditions = append(conditions, condition)
}
cluster.Status.Conditions = conditions
return nil
}
seconds := time.Until(cert.NotAfter).Seconds()
if seconds/86400 <= 7 {
if err = r.renewKubeConfig(ctx, cluster, clusterClient, config, cert); err != nil {
return err
}
}
r.updateClusterCondition(cluster, clusterv1alpha1.ClusterCondition{
Type: clusterv1alpha1.ClusterKubeConfigCertExpiresInSevenDays,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: string(clusterv1alpha1.ClusterKubeConfigCertExpiresInSevenDays),
Message: cert.NotAfter.String(),
})
return nil
}
func parseKubeConfigCert(config *rest.Config) (*x509.Certificate, error) {
if config.CertData == nil {
return nil, nil
}
block, _ := pem.Decode(config.CertData)
if block == nil {
return nil, fmt.Errorf("pem.Decode failed, got empty block data")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
return cert, nil
}
func (r *Reconciler) renewKubeConfig(
ctx context.Context, cluster *clusterv1alpha1.Cluster, clusterClient client.Client, config *rest.Config, cert *x509.Certificate,
) error {
apiConfig, err := clientcmd.Load(cluster.Spec.Connection.KubeConfig)
if err != nil {
return err
}
currentContext := apiConfig.Contexts[apiConfig.CurrentContext]
username := currentContext.AuthInfo
authInfo := apiConfig.AuthInfos[username]
if authInfo.Token != "" {
return nil
}
for _, v := range cert.Subject.Organization {
// we cannot update the certificate of the system:masters group and will use the certificate of the admin user directly
// certificatesigningrequests.certificates.k8s.io is forbidden:
// use of kubernetes.io/kube-apiserver-client signer with system:masters group is not allowed
//
// for cases where we can't issue a certificate, we use the token of the kubesphere service account directly
if v == user.SystemPrivilegedGroup {
data, err := setKubeSphereSAToken(ctx, clusterClient, apiConfig, username)
if err != nil {
return err
}
cluster.Spec.Connection.KubeConfig = data
return nil
}
}
kubeconfig, err := genKubeConfig(ctx, clusterClient, config, username)
if err != nil {
return err
}
cluster.Spec.Connection.KubeConfig = kubeconfig
return nil
}
func setKubeSphereSAToken(
ctx context.Context, clusterClient client.Client, apiConfig *clientcmdapi.Config, username string,
) ([]byte, error) {
secrets := &corev1.SecretList{}
if err := clusterClient.List(ctx, secrets,
client.InNamespace(constants.KubeSphereNamespace),
client.MatchingLabels{"kubesphere.io/service-account-token": ""},
); err != nil {
return nil, err
}
var secret *corev1.Secret
for i, item := range secrets.Items {
if item.Type == corev1.SecretTypeServiceAccountToken {
secret = &secrets.Items[i]
break
}
}
if secret == nil {
return nil, fmt.Errorf("no kubesphere ServiceAccount secret found")
}
apiConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
username: {
Token: string(secret.Data["token"]),
},
}
data, err := clientcmd.Write(*apiConfig)
if err != nil {
return nil, err
}
return data, nil
}
func genKubeConfig(ctx context.Context, clusterClient client.Client, clusterConfig *rest.Config, username string) ([]byte, error) {
csrName, err := createCSR(ctx, clusterClient, username)
if err != nil {
return nil, err
}
var privateKey, clientCert []byte
if err = wait.PollUntilContextTimeout(ctx, time.Second*3, time.Minute, false, func(ctx context.Context) (bool, error) {
csr := &certificatesv1.CertificateSigningRequest{}
if err = clusterClient.Get(ctx, types.NamespacedName{Name: csrName}, csr); err != nil {
return false, err
}
if len(csr.Status.Certificate) == 0 {
return false, nil
}
privateKey = []byte(csr.Annotations[kubeconfig.PrivateKeyAnnotation])
clientCert = csr.Status.Certificate
return true, nil
}); err != nil {
return nil, err
}
var ca []byte
if len(clusterConfig.CAData) > 0 {
ca = clusterConfig.CAData
} else {
ca, err = os.ReadFile(kubeconfig.InClusterCAFilePath)
if err != nil {
klog.Errorf("Failed to read CA file: %v", err)
return nil, err
}
}
currentContext := fmt.Sprintf("%s@%s", username, kubeconfig.DefaultClusterName)
config := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Preferences: clientcmdapi.Preferences{},
Clusters: map[string]*clientcmdapi.Cluster{kubeconfig.DefaultClusterName: {
Server: clusterConfig.Host,
InsecureSkipTLSVerify: false,
CertificateAuthorityData: ca,
}},
Contexts: map[string]*clientcmdapi.Context{currentContext: {
Cluster: kubeconfig.DefaultClusterName,
AuthInfo: username,
Namespace: kubeconfig.DefaultNamespace,
}},
AuthInfos: map[string]*clientcmdapi.AuthInfo{
username: {
ClientKeyData: privateKey,
ClientCertificateData: clientCert,
},
},
CurrentContext: currentContext,
}
return clientcmd.Write(config)
}
func createCSR(ctx context.Context, clusterClient client.Client, username string) (string, error) {
x509csr, x509key, err := pkiutil.NewCSRAndKey(&certutil.Config{
CommonName: username,
Organization: nil,
AltNames: certutil.AltNames{},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
if err != nil {
klog.Errorf("Failed to create CSR and key for user %s: %v", username, err)
return "", err
}
var csrBuffer, keyBuffer bytes.Buffer
if err = pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(x509key)}); err != nil {
klog.Errorf("Failed to encode private key for user %s: %v", username, err)
return "", err
}
var csrBytes []byte
if csrBytes, err = x509.CreateCertificateRequest(rand.Reader, x509csr, x509key); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return "", err
}
if err = pem.Encode(&csrBuffer, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes}); err != nil {
klog.Errorf("Failed to encode CSR for user %s: %v", username, err)
return "", err
}
csr := csrBuffer.Bytes()
key := keyBuffer.Bytes()
csrName := fmt.Sprintf("%s-csr-%d", username, time.Now().Unix())
k8sCSR := &certificatesv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: csrName,
Annotations: map[string]string{kubeconfig.PrivateKeyAnnotation: string(key)},
},
Spec: certificatesv1.CertificateSigningRequestSpec{
Request: csr,
SignerName: certificatesv1.KubeAPIServerClientSignerName,
Usages: []certificatesv1.KeyUsage{certificatesv1.UsageKeyEncipherment, certificatesv1.UsageClientAuth, certificatesv1.UsageDigitalSignature},
Username: username,
Groups: []string{user.AllAuthenticated},
},
}
if err = clusterClient.Create(ctx, k8sCSR); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return "", err
}
return approveCSR(ctx, clusterClient, k8sCSR)
}
func approveCSR(ctx context.Context, clusterClient client.Client, csr *certificatesv1.CertificateSigningRequest) (string, error) {
csr.Status = certificatesv1.CertificateSigningRequestStatus{
Conditions: []certificatesv1.CertificateSigningRequestCondition{{
Status: corev1.ConditionTrue,
Type: certificatesv1.CertificateApproved,
Reason: "KubeSphereApprove",
Message: "This CSR was approved by KubeSphere",
LastUpdateTime: metav1.Time{
Time: time.Now(),
},
}},
}
if err := clusterClient.SubResource("approval").Update(ctx, csr, &client.SubResourceUpdateOptions{SubResourceBody: csr}); err != nil {
return "", err
}
return csr.Name, nil
}

View File

@@ -0,0 +1,46 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package predicate
import (
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"kubesphere.io/kubesphere/pkg/controller/cluster/utils"
)
type ClusterStatusChangedPredicate struct {
predicate.Funcs
}
func (ClusterStatusChangedPredicate) Update(e event.UpdateEvent) bool {
oldCluster, ok := e.ObjectOld.(*clusterv1alpha1.Cluster)
if !ok {
return false
}
newCluster := e.ObjectNew.(*clusterv1alpha1.Cluster)
// cluster is ready
if !utils.IsClusterReady(oldCluster) && utils.IsClusterReady(newCluster) {
return true
}
if !utils.IsClusterSchedulable(oldCluster) && utils.IsClusterSchedulable(newCluster) {
return true
}
return false
}
func (ClusterStatusChangedPredicate) Create(_ event.CreateEvent) bool {
return false
}
func (ClusterStatusChangedPredicate) Delete(_ event.DeleteEvent) bool {
return false
}
func (ClusterStatusChangedPredicate) Generic(_ event.GenericEvent) bool {
return false
}

View File

@@ -1,313 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
)
// Following code copied from sigs.k8s.io/kubefed to avoid import collision
// UnjoinCluster performs all the necessary steps to remove the
// registration of a cluster from a KubeFed control plane provided the
// required set of parameters are passed in.
func unjoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, hostClusterName, unjoiningClusterName string, forceDeletion, dryRun bool, skipMemberClusterResources bool) error {
hostClientset, err := util.HostClientset(hostConfig)
if err != nil {
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
return err
}
var clusterClientset *kubeclient.Clientset
if clusterConfig != nil {
clusterClientset, err = util.ClusterClientset(clusterConfig)
if err != nil {
klog.V(2).Infof("Failed to get unjoining cluster clientset: %v", err)
if !forceDeletion {
return err
}
}
}
client, err := genericclient.New(hostConfig)
if err != nil {
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
return err
}
if clusterClientset != nil && !skipMemberClusterResources {
err := deleteRBACResources(clusterClientset, kubefedNamespace, unjoiningClusterName, hostClusterName, forceDeletion, dryRun)
if err != nil {
if !forceDeletion {
return err
}
klog.V(2).Infof("Failed to delete RBAC resources: %v", err)
}
err = deleteFedNSFromUnjoinCluster(hostClientset, clusterClientset, kubefedNamespace, unjoiningClusterName, dryRun)
if err != nil {
if !forceDeletion {
return err
}
klog.V(2).Infof("Failed to delete kubefed namespace: %v", err)
}
}
// deletionSucceeded when all operations in deleteRBACResources and deleteFedNSFromUnjoinCluster succeed.
err = deleteFederatedClusterAndSecret(hostClientset, client, kubefedNamespace, unjoiningClusterName, forceDeletion, dryRun)
if err != nil {
return err
}
return nil
}
// deleteKubeFedClusterAndSecret deletes a federated cluster resource that associates
// the cluster and secret.
func deleteFederatedClusterAndSecret(hostClientset kubeclient.Interface, client genericclient.Client,
kubefedNamespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
klog.V(2).Infof("Deleting kubefed cluster resource from namespace %q for unjoin cluster %q",
kubefedNamespace, unjoiningClusterName)
fedCluster := &fedv1b1.KubeFedCluster{}
err := client.Get(context.TODO(), fedCluster, kubefedNamespace, unjoiningClusterName)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return errors.Wrapf(err, "Failed to get kubefed cluster \"%s/%s\"", kubefedNamespace, unjoiningClusterName)
}
err = hostClientset.CoreV1().Secrets(kubefedNamespace).Delete(context.Background(), fedCluster.Spec.SecretRef.Name,
metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Secret \"%s/%s\" does not exist in the host cluster.", kubefedNamespace, fedCluster.Spec.SecretRef.Name)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Failed to delete secret \"%s/%s\" for unjoin cluster %q",
kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted secret \"%s/%s\" for unjoin cluster %q", kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
}
err = client.Delete(context.TODO(), fedCluster, fedCluster.Namespace, fedCluster.Name)
if apierrors.IsNotFound(err) {
klog.V(2).Infof("KubeFed cluster \"%s/%s\" does not exist in the host cluster.", fedCluster.Namespace, fedCluster.Name)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Failed to delete kubefed cluster \"%s/%s\" for unjoin cluster %q", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted kubefed cluster \"%s/%s\" for unjoin cluster %q.", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
}
return nil
}
// deleteRBACResources deletes the cluster role, cluster rolebindings and service account
// from the unjoining cluster.
func deleteRBACResources(unjoiningClusterClientset kubeclient.Interface,
namespace, unjoiningClusterName, hostClusterName string, forceDeletion, dryRun bool) error {
saName := ClusterServiceAccountName(unjoiningClusterName, hostClusterName)
err := deleteClusterRoleAndBinding(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, forceDeletion, dryRun)
if err != nil {
return err
}
err = deleteServiceAccount(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, dryRun)
if err != nil {
return err
}
return nil
}
// deleteFedNSFromUnjoinCluster deletes the kubefed namespace from
// the unjoining cluster so long as the unjoining cluster is not the
// host cluster.
func deleteFedNSFromUnjoinCluster(hostClientset, unjoiningClusterClientset kubeclient.Interface,
kubefedNamespace, unjoiningClusterName string, dryRun bool) error {
if dryRun {
return nil
}
hostClusterNamespace, err := hostClientset.CoreV1().Namespaces().Get(context.Background(), kubefedNamespace, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "Error retrieving namespace %q from host cluster", kubefedNamespace)
}
unjoiningClusterNamespace, err := unjoiningClusterClientset.CoreV1().Namespaces().Get(context.Background(), kubefedNamespace, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "Error retrieving namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
}
if IsPrimaryCluster(hostClusterNamespace, unjoiningClusterNamespace) {
klog.V(2).Infof("The kubefed namespace %q does not need to be deleted from the host cluster by unjoin.", kubefedNamespace)
return nil
}
klog.V(2).Infof("Deleting kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
err = unjoiningClusterClientset.CoreV1().Namespaces().Delete(context.Background(), kubefedNamespace, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("The kubefed namespace %q no longer exists in unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
return nil
} else if err != nil {
return errors.Wrapf(err, "Could not delete kubefed namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
} else {
klog.V(2).Infof("Deleted kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
}
return nil
}
// deleteServiceAccount deletes a service account in the cluster associated
// with clusterClientset with credentials that are used by the host cluster
// to access its API server.
func deleteServiceAccount(clusterClientset kubeclient.Interface, saName,
namespace, unjoiningClusterName string, dryRun bool) error {
if dryRun {
return nil
}
klog.V(2).Infof("Deleting service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
// Delete a service account.
err := clusterClientset.CoreV1().ServiceAccounts(namespace).Delete(context.Background(), saName,
metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Service account \"%s/%s\" does not exist.", namespace, saName)
} else if err != nil {
return errors.Wrapf(err, "Could not delete service account \"%s/%s\"", namespace, saName)
} else {
klog.V(2).Infof("Deleted service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
}
return nil
}
// deleteClusterRoleAndBinding deletes an RBAC cluster role and binding that
// allows the service account identified by saName to access all resources in
// all namespaces in the cluster associated with clusterClientset.
func deleteClusterRoleAndBinding(clusterClientset kubeclient.Interface,
saName, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
roleName := util.RoleName(saName)
healthCheckRoleName := util.HealthCheckRoleName(saName, namespace)
// Attempt to delete all role and role bindings created by join
for _, name := range []string{roleName, healthCheckRoleName} {
klog.V(2).Infof("Deleting cluster role binding %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
err := clusterClientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), name, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Cluster role binding %q for service account %q does not exist in unjoining cluster %q.",
name, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete cluster role binding %q for service account %q in unjoining cluster %q",
name, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted cluster role binding %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
}
klog.V(2).Infof("Deleting cluster role %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
err = clusterClientset.RbacV1().ClusterRoles().Delete(context.Background(), name, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Cluster role %q for service account %q does not exist in unjoining cluster %q.",
name, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete cluster role %q for service account %q in unjoining cluster %q",
name, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted cluster role %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
}
}
klog.V(2).Infof("Deleting role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
err := clusterClientset.RbacV1().RoleBindings(namespace).Delete(context.Background(), roleName, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Role binding \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete role binding \"%s/%s\" for service account %q in unjoining cluster %q",
namespace, roleName, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
}
klog.V(2).Infof("Deleting role \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
err = clusterClientset.RbacV1().Roles(namespace).Delete(context.Background(), roleName, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Role \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete role \"%s/%s\" for service account %q in unjoining cluster %q",
namespace, roleName, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleting Role \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
}
return nil
}

View File

@@ -1,65 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// HostClientset provides a kubernetes API compliant clientset to
// communicate with the host cluster's kubernetes API server.
func HostClientset(config *rest.Config) (*kubeclient.Clientset, error) {
return kubeclient.NewForConfig(config)
}
// ClusterClientset provides a kubernetes API compliant clientset to
// communicate with the joining cluster's kubernetes API server.
func ClusterClientset(config *rest.Config) (*kubeclient.Clientset, error) {
return kubeclient.NewForConfig(config)
}
// ClusterServiceAccountName returns the name of a service account whose
// credentials are used by the host cluster to access the client cluster.
func ClusterServiceAccountName(joiningClusterName, hostClusterName string) string {
return fmt.Sprintf("%s-%s", joiningClusterName, hostClusterName)
}
// IsPrimaryCluster checks if the caller is working with objects for the
// primary cluster by checking if the UIDs match for both ObjectMetas passed
// in.
// TODO (font): Need to revisit this when cluster ID is available.
func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool {
meta := MetaAccessor(obj)
clusterMeta := MetaAccessor(clusterObj)
return meta.GetUID() == clusterMeta.GetUID()
}
func MetaAccessor(obj pkgruntime.Object) metav1.Object {
accessor, err := meta.Accessor(obj)
if err != nil {
// This should always succeed if obj is not nil. Also,
// adapters are slated for replacement by unstructured.
return nil
}
return accessor
}

View File

@@ -1,23 +1,17 @@
/*
Copyright 2022 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package utils
import (
"os"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
)
@@ -30,3 +24,70 @@ func IsClusterReady(cluster *clusterv1alpha1.Cluster) bool {
}
return false
}
func IsClusterSchedulable(cluster *clusterv1alpha1.Cluster) bool {
if !cluster.DeletionTimestamp.IsZero() {
return false
}
if !IsClusterReady(cluster) {
return false
}
for _, condition := range cluster.Status.Conditions {
if condition.Type == clusterv1alpha1.ClusterSchedulable && condition.Status == corev1.ConditionFalse {
return false
}
}
return true
}
func IsHostCluster(cluster *clusterv1alpha1.Cluster) bool {
if _, ok := cluster.Labels[clusterv1alpha1.HostCluster]; ok {
return true
}
return false
}
func BuildKubeconfigFromRestConfig(config *rest.Config) ([]byte, error) {
apiConfig := api.NewConfig()
apiCluster := &api.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
// generated kubeconfig will be used by cluster federation, CAFile is not
// accepted by kubefed, so we need read CAFile
if len(apiCluster.CertificateAuthorityData) == 0 && len(config.CAFile) != 0 {
caData, err := os.ReadFile(config.CAFile)
if err != nil {
return nil, err
}
apiCluster.CertificateAuthorityData = caData
}
apiConfig.Clusters["kubernetes"] = apiCluster
apiConfig.AuthInfos["kubernetes-admin"] = &api.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
Token: config.BearerToken,
}
if config.BearerTokenFile != "" {
newToken, _ := os.ReadFile(config.BearerToken)
if len(newToken) > 0 {
apiConfig.AuthInfos["kubernetes-admin"].Token = string(newToken)
}
}
apiConfig.Contexts["kubernetes-admin@kubernetes"] = &api.Context{
Cluster: "kubernetes",
AuthInfo: "kubernetes-admin",
}
apiConfig.CurrentContext = "kubernetes-admin@kubernetes"
return clientcmd.Write(*apiConfig)
}

View File

@@ -0,0 +1,127 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package clusterlabel
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
// Reconciler is a reconciler for the Label object.
type Reconciler struct {
client.Client
}
func (r *Reconciler) Name() string {
return "clusterlabel"
}
func (r *Reconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
// Reconcile reconciles the Label object, sync label to the individual Cluster CRs.
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
label := &clusterv1alpha1.Label{}
if err := r.Get(ctx, req.NamespacedName, label); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if label.DeletionTimestamp != nil {
return ctrl.Result{}, r.deleteLabel(ctx, label)
}
if len(label.Finalizers) == 0 {
label.Finalizers = []string{clusterv1alpha1.LabelFinalizer}
if err := r.Update(ctx, label); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, nil
}
return ctrl.Result{}, r.syncLabelToClusters(ctx, label)
}
func (r *Reconciler) syncLabelToClusters(ctx context.Context, label *clusterv1alpha1.Label) error {
klog.V(4).Infof("sync label %s[%s/%v] to clusters: %v", label.Name, label.Spec.Key, label.Spec.Value, label.Spec.Clusters)
clusterSets := sets.NewString(label.Spec.Clusters...)
for name := range clusterSets {
cluster := &clusterv1alpha1.Cluster{}
if err := r.Get(ctx, client.ObjectKey{Name: name}, cluster); err != nil {
if errors.IsNotFound(err) {
clusterSets.Delete(name)
continue
} else {
return err
}
}
if cluster.Labels == nil {
cluster.Labels = make(map[string]string)
}
if _, ok := cluster.Labels[fmt.Sprintf(clusterv1alpha1.ClusterLabelFormat, label.Name)]; ok {
continue
}
cluster.Labels[fmt.Sprintf(clusterv1alpha1.ClusterLabelFormat, label.Name)] = ""
if err := r.Update(ctx, cluster); err != nil {
return err
}
}
clusters := clusterSets.List()
// some clusters have been deleted and this list needs to be updated
if len(clusters) != len(label.Spec.Clusters) {
label.Spec.Clusters = clusters
return r.Update(ctx, label)
}
return nil
}
func (r *Reconciler) deleteLabel(ctx context.Context, label *clusterv1alpha1.Label) error {
klog.V(4).Infof("deleting label %s, removing cluster %v related label", label.Name, label.Spec.Clusters)
for _, name := range label.Spec.Clusters {
cluster := &clusterv1alpha1.Cluster{}
if err := r.Get(ctx, client.ObjectKey{Name: name}, cluster); err != nil {
if errors.IsNotFound(err) {
continue
} else {
return err
}
}
delete(cluster.Labels, fmt.Sprintf(clusterv1alpha1.ClusterLabelFormat, label.Name))
if err := r.Update(ctx, cluster); err != nil {
return err
}
}
label.Finalizers = nil
return r.Update(ctx, label)
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return builder.
ControllerManagedBy(mgr).
For(
&clusterv1alpha1.Label{},
builder.WithPredicates(
predicate.ResourceVersionChangedPredicate{},
),
).
Complete(r)
}

View File

@@ -0,0 +1,104 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package clusterrole
import (
"context"
"fmt"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/go-logr/logr"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
rbachelper "kubesphere.io/kubesphere/pkg/componenthelper/auth/rbac"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
rbacutils "kubesphere.io/kubesphere/pkg/utils/rbac"
)
const (
controllerName = "clusterrole"
roleRef = "iam.kubesphere.io/clusterrole-ref"
)
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
type Reconciler struct {
client.Client
logger logr.Logger
recorder record.EventRecorder
helper *rbachelper.Helper
}
func (r *Reconciler) Name() string {
return controllerName
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.logger = ctrl.Log.WithName("controllers").WithName(controllerName)
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.Client = mgr.GetClient()
r.helper = rbachelper.NewHelper(mgr.GetClient())
return ctrl.NewControllerManagedBy(mgr).
Named(controllerName).
For(&iamv1beta1.ClusterRole{}).
Complete(r)
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.logger.WithValues("ClusterRole", req.String())
clusterRole := &iamv1beta1.ClusterRole{}
if err := r.Get(ctx, req.NamespacedName, clusterRole); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if clusterRole.AggregationRoleTemplates != nil {
if err := r.helper.AggregationRole(ctx, rbachelper.ClusterRoleRuleOwner{ClusterRole: clusterRole}, r.recorder); err != nil {
return ctrl.Result{}, err
}
}
if err := r.syncToKubernetes(ctx, clusterRole); err != nil {
log.Error(err, "sync cluster role failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) syncToKubernetes(ctx context.Context, clusterRole *iamv1beta1.ClusterRole) error {
k8sClusterRole := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: rbacutils.RelatedK8sResourceName(clusterRole.Name)},
}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, k8sClusterRole, func() error {
if k8sClusterRole.Labels == nil {
k8sClusterRole.Labels = make(map[string]string)
}
k8sClusterRole.Labels[roleRef] = clusterRole.Name
k8sClusterRole.Rules = clusterRole.Rules
if err := controllerutil.SetOwnerReference(clusterRole, k8sClusterRole, r.Scheme()); err != nil {
return fmt.Errorf("failed to set owner reference: %s", err)
}
return nil
})
if err != nil {
r.logger.Error(err, "sync cluster role failed", "cluster role", clusterRole.Name)
}
r.logger.V(4).Info("sync cluster role to K8s", "cluster role", clusterRole.Name, "op", op)
return nil
}

View File

@@ -1,238 +1,113 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package clusterrolebinding
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsv1informers "k8s.io/client-go/informers/apps/v1"
coreinfomers "k8s.io/client-go/informers/core/v1"
rbacv1informers "k8s.io/client-go/informers/rbac/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
rbacv1listers "k8s.io/client-go/listers/rbac/v1"
"k8s.io/client-go/tools/cache"
"github.com/go-logr/logr"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/models/kubectl"
rbachelper "kubesphere.io/kubesphere/pkg/componenthelper/auth/rbac"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
rbacutils "kubesphere.io/kubesphere/pkg/utils/rbac"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
successSynced = "Synced"
// is synced successfully
messageResourceSynced = "ClusterRoleBinding synced successfully"
controllerName = "clusterrolebinding-controller"
controllerName = "clusterrolebinding"
roleBindingRef = "iam.kubesphere.io/clusterrolebinding-ref"
)
type Controller struct {
k8sClient kubernetes.Interface
clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer
clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister
clusterRoleBindingSynced cache.InformerSynced
userSynced cache.InformerSynced
deploymentSynced cache.InformerSynced
podSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
kubectlOperator kubectl.Interface
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
type Reconciler struct {
client.Client
logger logr.Logger
recorder record.EventRecorder
helper *rbachelper.Helper
}
func NewController(k8sClient kubernetes.Interface, clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer,
deploymentInformer appsv1informers.DeploymentInformer, podInformer coreinfomers.PodInformer,
userInformer iamv1alpha2informers.UserInformer, kubectlImage string) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &Controller{
k8sClient: k8sClient,
clusterRoleBindingInformer: clusterRoleBindingInformer,
clusterRoleBindingLister: clusterRoleBindingInformer.Lister(),
clusterRoleBindingSynced: clusterRoleBindingInformer.Informer().HasSynced,
userSynced: userInformer.Informer().HasSynced,
deploymentSynced: deploymentInformer.Informer().HasSynced,
podSynced: podInformer.Informer().HasSynced,
kubectlOperator: kubectl.NewOperator(k8sClient, deploymentInformer, podInformer, userInformer, kubectlImage),
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleBinding"),
recorder: recorder,
}
klog.Info("Setting up event handlers")
clusterRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.enqueueClusterRoleBinding,
UpdateFunc: func(old, new interface{}) {
ctl.enqueueClusterRoleBinding(new)
},
DeleteFunc: ctl.enqueueClusterRoleBinding,
})
return ctl
func (r *Reconciler) Name() string {
return controllerName
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.logger = ctrl.Log.WithName("controllers").WithName(controllerName)
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.Client = mgr.GetClient()
r.helper = rbachelper.NewHelper(mgr.GetClient())
// Start the informer factories to begin populating the informer caches
klog.Info("Starting ClusterRoleBinding controller")
// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.clusterRoleBindingSynced, c.userSynced, c.deploymentSynced, c.podSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
return ctrl.NewControllerManagedBy(mgr).
Named(controllerName).
For(&iamv1beta1.ClusterRoleBinding{}).
Complete(r)
}
func (c *Controller) enqueueClusterRoleBinding(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.logger.WithValues("ClusterRoleBinding", req.String())
ctx = logr.NewContext(ctx, log)
clusterRole := &iamv1beta1.ClusterRoleBinding{}
if err := r.Get(ctx, req.NamespacedName, clusterRole); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
c.workqueue.Add(key)
if err := r.syncToKubernetes(ctx, clusterRole); err != nil {
log.Error(err, "sync cluster role binding failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
func (r *Reconciler) syncToKubernetes(ctx context.Context, clusterRoleBinding *iamv1beta1.ClusterRoleBinding) error {
k8sClusterRolBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: rbacutils.RelatedK8sResourceName(clusterRoleBinding.Name)},
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, k8sClusterRolBinding, func() error {
if k8sClusterRolBinding.Labels == nil {
k8sClusterRolBinding.Labels = make(map[string]string)
}
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.reconcile(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
k8sClusterRolBinding.Labels[roleBindingRef] = clusterRoleBinding.Name
k8sClusterRolBinding.RoleRef = rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: clusterRoleBinding.RoleRef.Kind,
Name: rbacutils.RelatedK8sResourceName(clusterRoleBinding.RoleRef.Name),
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully synced %s:%s", "key", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) reconcile(key string) error {
// Get the clusterRoleBinding with this name
clusterRoleBinding, err := c.clusterRoleBindingLister.Get(key)
if err != nil {
// The resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("clusterrolebinding '%s' in work queue no longer exists", key))
return nil
}
klog.Error(err)
return err
}
if clusterRoleBinding.RoleRef.Name == iamv1alpha2.ClusterAdmin {
var subjects []rbacv1.Subject
for _, subject := range clusterRoleBinding.Subjects {
if subject.Kind == iamv1alpha2.ResourceKindUser {
err = c.kubectlOperator.CreateKubectlDeploy(subject.Name, clusterRoleBinding)
if err != nil {
klog.Error(err)
return err
}
newSubject := rbacv1.Subject{
Kind: subject.Kind,
Name: subject.Name,
Namespace: subject.Namespace,
}
if subject.Kind != rbacv1.ServiceAccountKind {
newSubject.APIGroup = rbacv1.GroupName
}
subjects = append(subjects, newSubject)
}
k8sClusterRolBinding.Subjects = subjects
if err := controllerutil.SetOwnerReference(clusterRoleBinding, k8sClusterRolBinding, r.Scheme()); err != nil {
return fmt.Errorf("failed to set owner reference: %s", err)
}
return nil
})
if err != nil {
r.logger.Error(err, "sync cluster role binding failed", "cluster role binding", clusterRoleBinding.Name)
}
c.recorder.Event(clusterRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
r.logger.V(4).Info("sync cluster role binding to K8s", "cluster role binding", clusterRoleBinding.Name, "op", op)
return nil
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}

View File

@@ -0,0 +1,97 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package config
import (
"context"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/config/identityprovider"
"kubesphere.io/kubesphere/pkg/controller/config/oauthclient"
)
func (w *Webhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
secret := obj.(*v1.Secret)
validator := w.factory.GetValidator(secret.Type)
if validator != nil {
return validator.ValidateCreate(ctx, secret)
}
return nil, nil
}
func (w *Webhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) {
newSecret := newObj.(*v1.Secret)
oldSecret := oldObj.(*v1.Secret)
if validator := w.factory.GetValidator(newSecret.Type); validator != nil {
return validator.ValidateUpdate(ctx, oldSecret, newSecret)
}
return nil, nil
}
func (w *Webhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
secret := obj.(*v1.Secret)
validator := w.factory.GetValidator(secret.Type)
if validator != nil {
return validator.ValidateDelete(ctx, secret)
}
return nil, nil
}
func (w *Webhook) Default(ctx context.Context, obj runtime.Object) error {
secret := obj.(*v1.Secret)
if secret.Namespace != constants.KubeSphereNamespace {
return nil
}
defaulter := w.factory.GetDefaulter(secret.Type)
if defaulter != nil {
return defaulter.Default(ctx, secret)
}
return nil
}
var _ admission.CustomDefaulter = &Webhook{}
var _ admission.CustomValidator = &Webhook{}
var _ kscontroller.Controller = &Webhook{}
const webhookName = "kubesphere-config-webhook"
func (w *Webhook) Name() string {
return webhookName
}
type Webhook struct {
client.Client
factory *WebhookFactory
}
func (w *Webhook) SetupWithManager(mgr *kscontroller.Manager) error {
factory := NewWebhookFactory()
oauthWebhookHandler := &oauthclient.WebhookHandler{Client: mgr.GetClient()}
factory.RegisterValidator(oauthWebhookHandler)
factory.RegisterDefaulter(oauthWebhookHandler)
identityProviderWebhookHandler := &identityprovider.WebhookHandler{Client: mgr.GetClient()}
factory.RegisterValidator(identityProviderWebhookHandler)
factory.RegisterDefaulter(identityProviderWebhookHandler)
w.Client = mgr.GetClient()
w.factory = factory
return ctrl.NewWebhookManagedBy(mgr).
WithValidator(w).
WithDefaulter(w).
For(&v1.Secret{}).
Complete()
}

View File

@@ -0,0 +1,102 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package identityprovider
import (
"context"
"errors"
"fmt"
"sync"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
)
var once sync.Once
type WebhookHandler struct {
client.Client
getter identityprovider.ConfigurationGetter
}
func (w *WebhookHandler) Default(_ context.Context, secret *corev1.Secret) error {
configuration, err := identityprovider.UnmarshalFrom(secret)
if err != nil {
return err
}
if configuration.Name != "" {
if secret.Labels == nil {
secret.Labels = make(map[string]string)
}
secret.Labels[identityprovider.SecretTypeIdentityProvider] = configuration.Name
}
return nil
}
func (w *WebhookHandler) ValidateCreate(ctx context.Context, secret *corev1.Secret) (admission.Warnings, error) {
idp, err := identityprovider.UnmarshalFrom(secret)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal identity provider: %v", err)
}
if idp.Name == "" {
return nil, errors.New("invalid Identity Provider, please ensure that the provider name is not empty")
}
exists, err := w.isClientExist(ctx, idp.Name)
if err != nil {
return nil, fmt.Errorf("failed to check identity provider: %v", err)
}
if exists {
return nil, fmt.Errorf("invalid provider, provider name '%s' already exists", idp.Name)
}
return nil, nil
}
func (w *WebhookHandler) ValidateUpdate(_ context.Context, old, new *corev1.Secret) (admission.Warnings, error) {
oldIdp, err := identityprovider.UnmarshalFrom(old)
if err != nil {
return nil, err
}
newIdp, err := identityprovider.UnmarshalFrom(new)
if err != nil {
return nil, err
}
if newIdp.Name != oldIdp.Name {
return nil, fmt.Errorf("the provider name is immutable, old: %s, new: %s", oldIdp.Name, newIdp.Name)
}
return nil, nil
}
func (w *WebhookHandler) ValidateDelete(_ context.Context, _ *corev1.Secret) (admission.Warnings, error) {
return nil, nil
}
func (w *WebhookHandler) ConfigType() corev1.SecretType {
return identityprovider.SecretTypeIdentityProvider
}
func (w *WebhookHandler) isClientExist(ctx context.Context, clientName string) (bool, error) {
once.Do(func() {
w.getter = identityprovider.NewConfigurationGetter(w.Client)
})
_, err := w.getter.GetConfiguration(ctx, clientName)
if err != nil {
if errors.Is(identityprovider.ErrorIdentityProviderNotFound, err) {
return false, nil
}
return false, fmt.Errorf("failed to get identity provider: %v", err)
}
return true, nil
}

View File

@@ -0,0 +1,131 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package oauthclient
import (
"context"
"errors"
"fmt"
"math/rand"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
)
var once sync.Once
type WebhookHandler struct {
client.Client
getter oauth.ClientGetter
}
func (v *WebhookHandler) Default(_ context.Context, secret *v1.Secret) error {
oc, err := oauth.UnmarshalFrom(secret)
if err != nil {
return err
}
if oc.GrantMethod == "" {
oc.GrantMethod = oauth.GrantMethodAuto
}
if oc.Secret == "" {
oc.Secret = generatePassword(32)
}
if secret.Labels == nil {
secret.Labels = make(map[string]string)
}
secret.Labels[oauth.SecretTypeOAuthClient] = oc.Name
return oauth.MarshalInto(oc, secret)
}
func (v *WebhookHandler) ValidateCreate(ctx context.Context, secret *corev1.Secret) (admission.Warnings, error) {
oc, err := oauth.UnmarshalFrom(secret)
if err != nil {
return nil, err
}
if oc.Name != "" {
exist, err := v.clientExist(ctx, oc.Name)
if err != nil {
return nil, err
}
if exist {
return nil, fmt.Errorf("invalid OAuth client, client name '%s' already exists", oc.Name)
}
}
return validate(oc)
}
func (v *WebhookHandler) ValidateUpdate(_ context.Context, old, new *corev1.Secret) (admission.Warnings, error) {
newOc, err := oauth.UnmarshalFrom(new)
if err != nil {
return nil, err
}
oldOc, err := oauth.UnmarshalFrom(old)
if err != nil {
return nil, err
}
if newOc.Name != oldOc.Name {
return nil, fmt.Errorf("cannot change client name")
}
return validate(newOc)
}
func (v *WebhookHandler) ValidateDelete(_ context.Context, _ *corev1.Secret) (admission.Warnings, error) {
return nil, nil
}
func (v *WebhookHandler) ConfigType() corev1.SecretType {
return oauth.SecretTypeOAuthClient
}
// validate performs general validation for the OAuth client.
func validate(oc *oauth.Client) (admission.Warnings, error) {
if oc.Name == "" {
return nil, fmt.Errorf("invalid OAuth client, please ensure that the client name is not empty")
}
if err := oauth.ValidateClient(*oc); err != nil {
return nil, err
}
// Other scope values MAY be present.
// Scope values used that are not understood by an implementation SHOULD be ignored.
if !oauth.IsValidScopes(oc.ScopeRestrictions) {
warnings := fmt.Sprintf("some requested scopes were invalid: %v", oc.ScopeRestrictions)
return []string{warnings}, nil
}
return nil, nil
}
func (v *WebhookHandler) clientExist(ctx context.Context, clientName string) (bool, error) {
once.Do(func() {
v.getter = oauth.NewOAuthClientGetter(v.Client)
})
if _, err := v.getter.GetOAuthClient(ctx, clientName); err != nil {
if errors.Is(err, oauth.ErrorClientNotFound) {
return false, nil
}
return false, err
}
return true, nil
}
func generatePassword(length int) string {
characters := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
password := make([]byte, length)
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := range password {
password[i] = characters[r.Intn(len(characters))]
}
return string(password)
}

View File

@@ -0,0 +1,54 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package config
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
v1 "k8s.io/api/core/v1"
)
type ValidatorInterface interface {
ValidateCreate(ctx context.Context, secret *v1.Secret) (admission.Warnings, error)
ValidateUpdate(ctx context.Context, old, new *v1.Secret) (admission.Warnings, error)
ValidateDelete(ctx context.Context, secret *v1.Secret) (admission.Warnings, error)
ConfigType() v1.SecretType
}
type DefaulterInterface interface {
Default(ctx context.Context, secret *v1.Secret) error
ConfigType() v1.SecretType
}
func (w *WebhookFactory) RegisterValidator(validator ValidatorInterface) {
w.validators[validator.ConfigType()] = validator
}
func (w *WebhookFactory) RegisterDefaulter(defaulter DefaulterInterface) {
w.defaulters[defaulter.ConfigType()] = defaulter
}
func (w *WebhookFactory) GetValidator(secretType v1.SecretType) ValidatorInterface {
return w.validators[secretType]
}
func (w *WebhookFactory) GetDefaulter(secretType v1.SecretType) DefaulterInterface {
return w.defaulters[secretType]
}
type WebhookFactory struct {
validators map[v1.SecretType]ValidatorInterface
defaulters map[v1.SecretType]DefaulterInterface
}
func NewWebhookFactory() *WebhookFactory {
return &WebhookFactory{
validators: make(map[v1.SecretType]ValidatorInterface),
defaulters: make(map[v1.SecretType]DefaulterInterface),
}
}

View File

@@ -0,0 +1,36 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package controllertest
import (
"fmt"
"io/fs"
"path/filepath"
"runtime"
)
func LoadCrdPath() ([]string, error) {
_, filename, _, ok := runtime.Caller(0)
if !ok {
return nil, fmt.Errorf("could not determine path")
}
curDir, _ := filepath.Split(filename)
crdDirPaths := make([]string, 0, 1)
projectRoot := filepath.Join(curDir, "..", "..", "..")
configRoot := filepath.Join(projectRoot, "config")
if err := filepath.WalkDir(configRoot, func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
_, file := filepath.Split(path)
if file == "crds" {
crdDirPaths = append(crdDirPaths, path)
}
}
return nil
}); err != nil {
return nil, err
}
return crdDirPaths, nil
}

View File

@@ -0,0 +1,28 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package conversion
import (
"sigs.k8s.io/controller-runtime/pkg/webhook/conversion"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
const webhookName = "conversion-webhook"
func (w *Webhook) Name() string {
return webhookName
}
var _ kscontroller.Controller = &Webhook{}
type Webhook struct {
}
func (w *Webhook) SetupWithManager(mgr *kscontroller.Manager) error {
mgr.GetWebhookServer().Register("/convert", conversion.NewWebhookHandler(mgr.GetScheme()))
return nil
}

View File

@@ -0,0 +1,104 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package core
import (
"context"
"strconv"
"strings"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
categoryController = "extension-category"
countOfRelatedExtensions = "kubesphere.io/count"
)
var _ kscontroller.Controller = &CategoryReconciler{}
var _ reconcile.Reconciler = &CategoryReconciler{}
func (r *CategoryReconciler) Name() string {
return categoryController
}
func (r *CategoryReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
type CategoryReconciler struct {
client.Client
recorder record.EventRecorder
logger logr.Logger
}
func (r *CategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.logger = ctrl.Log.WithName("controllers").WithName(categoryController)
r.recorder = mgr.GetEventRecorderFor(categoryController)
return ctrl.NewControllerManagedBy(mgr).
Named(categoryController).
For(&corev1alpha1.Category{}).
Watches(
&corev1alpha1.Extension{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request {
var requests []reconcile.Request
extension := object.(*corev1alpha1.Extension)
if category := extension.Labels[corev1alpha1.CategoryLabel]; category != "" {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: category,
},
})
}
return requests
}),
builder.WithPredicates(predicate.LabelChangedPredicate{}),
).
Complete(r)
}
func (r *CategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.logger.WithValues("category", req.String())
logger.V(4).Info("sync category")
ctx = klog.NewContext(ctx, logger)
category := &corev1alpha1.Category{}
if err := r.Client.Get(ctx, req.NamespacedName, category); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
extensions := &corev1alpha1.ExtensionList{}
if err := r.List(ctx, extensions, client.MatchingLabels{corev1alpha1.CategoryLabel: category.Name}); err != nil {
return ctrl.Result{}, err
}
total := strconv.Itoa(len(extensions.Items))
if category.Annotations[countOfRelatedExtensions] != total {
if category.Annotations == nil {
category.Annotations = make(map[string]string)
}
category.Annotations[countOfRelatedExtensions] = total
if err := r.Update(ctx, category); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}

View File

@@ -0,0 +1,182 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package core
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"github.com/Masterminds/semver/v3"
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
const (
extensionProtection = "kubesphere.io/extension-protection"
extensionController = "extension"
)
var _ kscontroller.Controller = &ExtensionReconciler{}
var _ reconcile.Reconciler = &ExtensionReconciler{}
func (r *ExtensionReconciler) Name() string {
return extensionController
}
func (r *ExtensionReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
type ExtensionReconciler struct {
client.Client
k8sVersion *semver.Version
logger logr.Logger
}
func (r *ExtensionReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.k8sVersion = mgr.K8sVersion
r.logger = ctrl.Log.WithName("controllers").WithName(extensionController)
return ctrl.NewControllerManagedBy(mgr).
Named(extensionController).
For(&corev1alpha1.Extension{}).
Watches(
&corev1alpha1.ExtensionVersion{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request {
var requests []reconcile.Request
extensionVersion := object.(*corev1alpha1.ExtensionVersion)
extensionName := extensionVersion.Labels[corev1alpha1.ExtensionReferenceLabel]
if extensionName != "" {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: extensionName,
},
})
}
return requests
}),
builder.WithPredicates(predicate.Funcs{
GenericFunc: func(event event.GenericEvent) bool {
return false
},
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
return false
},
}),
).
Complete(r)
}
func (r *ExtensionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
extension := &corev1alpha1.Extension{}
if err := r.Client.Get(ctx, req.NamespacedName, extension); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
r.logger.V(4).Info("reconcile", "extension", extension.Name)
if extension.ObjectMeta.DeletionTimestamp != nil {
return r.reconcileDelete(ctx, extension)
}
if !controllerutil.ContainsFinalizer(extension, extensionProtection) {
expected := extension.DeepCopy()
controllerutil.AddFinalizer(expected, extensionProtection)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(extension))
}
if err := r.syncExtensionStatus(ctx, extension); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to sync extension status: %s", err)
}
r.logger.V(4).Info("synced", "extension", extension.Name)
return ctrl.Result{}, nil
}
// reconcileDelete delete the extension.
func (r *ExtensionReconciler) reconcileDelete(ctx context.Context, extension *corev1alpha1.Extension) (ctrl.Result, error) {
deletePolicy := metav1.DeletePropagationBackground
if err := r.DeleteAllOf(ctx, &corev1alpha1.ExtensionVersion{}, &client.DeleteAllOfOptions{
ListOptions: client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{corev1alpha1.ExtensionReferenceLabel: extension.Name}),
},
DeleteOptions: client.DeleteOptions{PropagationPolicy: &deletePolicy},
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete related ExtensionVersion: %s", err)
}
// Remove the finalizer from the extension
controllerutil.RemoveFinalizer(extension, extensionProtection)
if err := r.Update(ctx, extension); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *ExtensionReconciler) syncExtensionStatus(ctx context.Context, extension *corev1alpha1.Extension) error {
versionList := corev1alpha1.ExtensionVersionList{}
if err := r.List(ctx, &versionList, client.MatchingLabels{
corev1alpha1.ExtensionReferenceLabel: extension.Name,
}); err != nil {
return err
}
versions := make([]corev1alpha1.ExtensionVersionInfo, 0, len(versionList.Items))
for i := range versionList.Items {
if versionList.Items[i].DeletionTimestamp.IsZero() {
versions = append(versions, corev1alpha1.ExtensionVersionInfo{
Version: versionList.Items[i].Spec.Version,
CreationTimestamp: versionList.Items[i].CreationTimestamp,
})
}
}
sort.Slice(versions, func(i, j int) bool {
return versions[i].Version < versions[j].Version
})
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err := r.Get(ctx, types.NamespacedName{Name: extension.Name}, extension); err != nil {
return err
}
expected := extension.DeepCopy()
if recommended, err := getRecommendedExtensionVersion(versionList.Items, r.k8sVersion); err == nil {
expected.Status.RecommendedVersion = recommended
} else {
r.logger.Error(err, "failed to get recommended extension version")
}
expected.Status.Versions = versions
if expected.Status.RecommendedVersion != extension.Status.RecommendedVersion ||
!reflect.DeepEqual(expected.Status.Versions, extension.Status.Versions) {
return r.Update(ctx, expected)
}
return nil
})
if err != nil {
return fmt.Errorf("failed to update extension status: %s", err)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,98 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package core
import (
"bytes"
"context"
"fmt"
"strings"
"unicode"
"k8s.io/apimachinery/pkg/util/yaml"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
var _ admission.CustomValidator = &InstallPlanWebhook{}
var _ kscontroller.Controller = &InstallPlanWebhook{}
func (r *InstallPlanWebhook) Name() string {
return "installplan-webhook"
}
type InstallPlanWebhook struct {
client.Client
}
func trimSpace(data string) string {
lines := strings.Split(data, "\n")
var buf bytes.Buffer
max := len(lines)
for i, line := range lines {
buf.Write([]byte(strings.TrimRightFunc(line, unicode.IsSpace)))
if i < max-1 {
buf.Write([]byte("\n"))
}
}
return buf.String()
}
func (r *InstallPlanWebhook) Default(ctx context.Context, obj runtime.Object) error {
installPlan := obj.(*corev1alpha1.InstallPlan)
installPlan.Spec.Config = trimSpace(installPlan.Spec.Config)
if installPlan.Spec.ClusterScheduling != nil {
for k, v := range installPlan.Spec.ClusterScheduling.Overrides {
installPlan.Spec.ClusterScheduling.Overrides[k] = trimSpace(v)
}
}
return nil
}
func (r *InstallPlanWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return r.validateInstallPlan(ctx, obj.(*corev1alpha1.InstallPlan))
}
func (r *InstallPlanWebhook) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) {
return r.validateInstallPlan(ctx, newObj.(*corev1alpha1.InstallPlan))
}
func (r *InstallPlanWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (r *InstallPlanWebhook) validateInstallPlan(_ context.Context, installPlan *corev1alpha1.InstallPlan) (admission.Warnings, error) {
var data interface{}
if err := yaml.Unmarshal([]byte(installPlan.Spec.Config), &data); err != nil {
return nil, fmt.Errorf("failed to unmarshal extension config: %v", err)
}
if installPlan.Spec.ClusterScheduling != nil {
for cluster, config := range installPlan.Spec.ClusterScheduling.Overrides {
if err := yaml.Unmarshal([]byte(config), &data); err != nil {
return nil, fmt.Errorf("failed to unmarshal cluster %s agent config: %v", cluster, err)
}
}
}
return nil, nil
}
func (r *InstallPlanWebhook) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
WithValidator(r).
WithDefaulter(r).
For(&corev1alpha1.InstallPlan{}).
Complete()
}

View File

@@ -0,0 +1,553 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package core
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"path"
"strings"
"time"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"github.com/go-logr/logr"
"helm.sh/helm/v3/pkg/chart/loader"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
"kubesphere.io/utils/helm"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/kubesphere/pkg/constants"
)
const (
repositoryProtection = "kubesphere.io/repository-protection"
repositoryController = "repository"
minimumRegistryPollInterval = 15 * time.Minute
defaultRequeueInterval = 15 * time.Second
generateNameFormat = "repository-%s"
extensionFileName = "extension.yaml"
)
var extensionRepoConflict = fmt.Errorf("extension repo mismatch")
var _ kscontroller.Controller = &RepositoryReconciler{}
var _ reconcile.Reconciler = &RepositoryReconciler{}
func (r *RepositoryReconciler) Name() string {
return repositoryController
}
func (r *RepositoryReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
type RepositoryReconciler struct {
client.Client
recorder record.EventRecorder
logger logr.Logger
}
func (r *RepositoryReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.logger = ctrl.Log.WithName("controllers").WithName(repositoryController)
r.recorder = mgr.GetEventRecorderFor(repositoryController)
return ctrl.NewControllerManagedBy(mgr).
Named(repositoryController).
For(&corev1alpha1.Repository{}).
Complete(r)
}
func (r *RepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.logger.WithValues("repository", req.String())
logger.V(4).Info("sync repository")
ctx = klog.NewContext(ctx, logger)
repo := &corev1alpha1.Repository{}
if err := r.Client.Get(ctx, req.NamespacedName, repo); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !repo.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, repo)
}
if !controllerutil.ContainsFinalizer(repo, repositoryProtection) {
expected := repo.DeepCopy()
controllerutil.AddFinalizer(expected, repositoryProtection)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(repo))
}
return r.reconcileRepository(ctx, repo)
}
// reconcileDelete delete the repository and pod.
func (r *RepositoryReconciler) reconcileDelete(ctx context.Context, repo *corev1alpha1.Repository) (ctrl.Result, error) {
// Remove the finalizer from the subscription and update it.
controllerutil.RemoveFinalizer(repo, repositoryProtection)
if err := r.Update(ctx, repo); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// createOrUpdateExtension create a new extension if the extension does not exist.
// Or it will update info of the extension.
func (r *RepositoryReconciler) createOrUpdateExtension(ctx context.Context, repo *corev1alpha1.Repository, extensionName string, extensionVersion *corev1alpha1.ExtensionVersion) (*corev1alpha1.Extension, error) {
logger := klog.FromContext(ctx)
extension := &corev1alpha1.Extension{ObjectMeta: metav1.ObjectMeta{Name: extensionName}}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, extension, func() error {
originRepoName := extension.Labels[corev1alpha1.RepositoryReferenceLabel]
if originRepoName != "" && originRepoName != repo.Name {
logger.Error(extensionRepoConflict, "conflict", "extension", extensionName, "want", originRepoName, "got", repo.Name)
return extensionRepoConflict
}
if extension.Labels == nil {
extension.Labels = make(map[string]string)
}
if extensionVersion.Spec.Category != "" {
extension.Labels[corev1alpha1.CategoryLabel] = extensionVersion.Spec.Category
}
extension.Labels[corev1alpha1.RepositoryReferenceLabel] = repo.Name
extension.Spec.ExtensionInfo = extensionVersion.Spec.ExtensionInfo
if err := controllerutil.SetOwnerReference(repo, extension, r.Scheme()); err != nil {
return err
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to update extension: %s", err)
}
logger.V(4).Info("extension successfully updated", "operation", op, "name", extension.Name)
return extension, nil
}
func (r *RepositoryReconciler) createOrUpdateExtensionVersion(ctx context.Context, extension *corev1alpha1.Extension, extensionVersion *corev1alpha1.ExtensionVersion) error {
logger := klog.FromContext(ctx)
version := &corev1alpha1.ExtensionVersion{ObjectMeta: metav1.ObjectMeta{Name: extensionVersion.Name}}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, version, func() error {
if version.Labels == nil {
version.Labels = make(map[string]string)
}
for k, v := range extensionVersion.Labels {
version.Labels[k] = v
}
version.Spec = extensionVersion.Spec
if err := controllerutil.SetOwnerReference(extension, version, r.Scheme()); err != nil {
return err
}
return nil
})
if err != nil {
return fmt.Errorf("failed to update extension version: %s", err)
}
logger.V(4).Info("extension version successfully updated", "operation", op, "name", extensionVersion.Name)
return nil
}
func (r *RepositoryReconciler) syncExtensionsFromURL(ctx context.Context, repo *corev1alpha1.Repository, repoURL string) error {
logger := klog.FromContext(ctx)
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
defer cancel()
cred := helm.RepoCredential{}
if repo.Spec.BasicAuth != nil {
cred.Username = repo.Spec.BasicAuth.Username
cred.Password = repo.Spec.BasicAuth.Password
}
index, err := helm.LoadRepoIndex(ctx, repoURL, cred)
if err != nil {
return err
}
for extensionName, versions := range index.Entries {
extensionVersions := make([]corev1alpha1.ExtensionVersion, 0, len(versions))
for _, version := range versions {
if version.Metadata == nil {
logger.Info("version metadata is empty", "repo", repo.Name)
continue
}
if version.Name != extensionName {
logger.Info("invalid extension version found", "want", extensionName, "got", version.Name)
continue
}
var chartURL string
if len(version.URLs) > 0 {
versionURL := version.URLs[0]
u, err := url.Parse(versionURL)
if err != nil {
logger.Error(err, "failed to parse chart URL", "url", versionURL)
continue
}
if u.Host == "" {
chartURL = fmt.Sprintf("%s/%s", repoURL, versionURL)
} else {
chartURL = u.String()
}
}
extensionVersionSpec, err := r.loadExtensionVersionSpecFrom(ctx, chartURL, repo)
if err != nil {
return fmt.Errorf("failed to load extension version spec: %s", err)
}
if extensionVersionSpec == nil {
logger.V(4).Info("extension version spec not found: %s", chartURL)
continue
}
extensionVersionSpec.Created = metav1.NewTime(version.Created)
extensionVersionSpec.Digest = version.Digest
extensionVersionSpec.Repository = repo.Name
extensionVersionSpec.ChartDataRef = nil
extensionVersionSpec.ChartURL = chartURL
extensionVersion := corev1alpha1.ExtensionVersion{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", extensionName, extensionVersionSpec.Version),
Labels: map[string]string{
corev1alpha1.RepositoryReferenceLabel: repo.Name,
corev1alpha1.ExtensionReferenceLabel: extensionName,
},
Annotations: version.Metadata.Annotations,
},
Spec: *extensionVersionSpec,
}
if extensionVersionSpec.Category != "" {
extensionVersion.Labels[corev1alpha1.CategoryLabel] = extensionVersionSpec.Category
}
extensionVersions = append(extensionVersions, extensionVersion)
}
latestExtensionVersion := getLatestExtensionVersion(extensionVersions)
if latestExtensionVersion == nil {
continue
}
extension, err := r.createOrUpdateExtension(ctx, repo, extensionName, latestExtensionVersion)
if err != nil {
if errors.Is(err, extensionRepoConflict) {
continue
}
return fmt.Errorf("failed to create or update extension: %s", err)
}
for _, extensionVersion := range extensionVersions {
if err := r.createOrUpdateExtensionVersion(ctx, extension, &extensionVersion); err != nil {
return fmt.Errorf("failed to create or update extension version: %s", err)
}
}
if err := r.removeSuspendedExtensionVersion(ctx, repo, extension, extensionVersions); err != nil {
return fmt.Errorf("failed to remove suspended extension version: %s", err)
}
}
extensions := &corev1alpha1.ExtensionList{}
if err := r.List(ctx, extensions, client.MatchingLabels{corev1alpha1.RepositoryReferenceLabel: repo.Name}); err != nil {
return fmt.Errorf("failed to list extensions: %s", err)
}
for _, extension := range extensions.Items {
if _, ok := index.Entries[extension.Name]; !ok {
if err := r.removeSuspendedExtensionVersion(ctx, repo, &extension, []corev1alpha1.ExtensionVersion{}); err != nil {
return fmt.Errorf("failed to remove suspended extension version: %s", err)
}
}
}
return nil
}
func (r *RepositoryReconciler) reconcileRepository(ctx context.Context, repo *corev1alpha1.Repository) (ctrl.Result, error) {
registryPollInterval := minimumRegistryPollInterval
if repo.Spec.UpdateStrategy != nil && repo.Spec.UpdateStrategy.Interval.Duration > minimumRegistryPollInterval {
registryPollInterval = repo.Spec.UpdateStrategy.Interval.Duration
}
var repoURL string
// URL and Image are immutable after creation
if repo.Spec.URL != "" {
repoURL = repo.Spec.URL
} else if repo.Spec.Image != "" {
var deployment appsv1.Deployment
if err := r.Get(ctx, types.NamespacedName{Namespace: constants.KubeSphereNamespace, Name: fmt.Sprintf(generateNameFormat, repo.Name)}, &deployment); err != nil {
if apierrors.IsNotFound(err) {
if err := r.deployRepository(ctx, repo); err != nil {
r.recorder.Event(repo, corev1.EventTypeWarning, "RepositoryDeployFailed", err.Error())
return ctrl.Result{}, fmt.Errorf("failed to deploy repository: %s", err)
}
r.recorder.Event(repo, corev1.EventTypeNormal, "RepositoryDeployed", "")
return ctrl.Result{Requeue: true, RequeueAfter: defaultRequeueInterval}, nil
}
return ctrl.Result{}, fmt.Errorf("failed to fetch deployment: %s", err)
}
restartAt, _ := time.Parse(time.RFC3339, deployment.Spec.Template.Annotations["kubesphere.io/restartedAt"])
if restartAt.IsZero() {
restartAt = deployment.ObjectMeta.CreationTimestamp.Time
}
// restart and pull the latest docker image
if time.Now().After(repo.Status.LastSyncTime.Add(registryPollInterval)) && time.Now().After(restartAt.Add(registryPollInterval)) {
rawData := []byte(fmt.Sprintf("{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"kubesphere.io/restartedAt\":\"%s\"}}}}}", time.Now().Format(time.RFC3339)))
if err := r.Patch(ctx, &deployment, client.RawPatch(types.StrategicMergePatchType, rawData)); err != nil {
return ctrl.Result{}, err
}
r.recorder.Event(repo, corev1.EventTypeNormal, "RepositoryRestarted", "")
return ctrl.Result{Requeue: true, RequeueAfter: defaultRequeueInterval}, nil
}
if deployment.Status.AvailableReplicas != deployment.Status.Replicas {
return ctrl.Result{Requeue: true, RequeueAfter: defaultRequeueInterval}, nil
}
// ready to sync
repoURL = fmt.Sprintf("http://%s.%s.svc", deployment.Name, constants.KubeSphereNamespace)
}
outOfSync := repo.Status.LastSyncTime == nil || time.Now().After(repo.Status.LastSyncTime.Add(registryPollInterval))
if repoURL != "" && outOfSync {
if err := r.syncExtensionsFromURL(ctx, repo, repoURL); err != nil {
r.recorder.Eventf(repo, corev1.EventTypeWarning, kscontroller.SyncFailed, "failed to sync extensions from %s: %s", repoURL, err)
return ctrl.Result{}, fmt.Errorf("failed to sync extensions: %s", err)
}
r.recorder.Eventf(repo, corev1.EventTypeNormal, kscontroller.Synced, "sync extensions from %s successfully", repoURL)
repo = repo.DeepCopy()
repo.Status.LastSyncTime = &metav1.Time{Time: time.Now()}
if err := r.Update(ctx, repo); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update repository: %s", err)
}
}
return ctrl.Result{Requeue: true, RequeueAfter: registryPollInterval}, nil
}
func (r *RepositoryReconciler) deployRepository(ctx context.Context, repo *corev1alpha1.Repository) error {
generateName := fmt.Sprintf(generateNameFormat, repo.Name)
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: generateName,
Namespace: constants.KubeSphereNamespace,
Labels: map[string]string{corev1alpha1.RepositoryReferenceLabel: repo.Name},
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{corev1alpha1.RepositoryReferenceLabel: repo.Name},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{corev1alpha1.RepositoryReferenceLabel: repo.Name},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "repository",
Image: repo.Spec.Image,
ImagePullPolicy: corev1.PullAlways,
Env: []corev1.EnvVar{
{
Name: "CHART_URL",
Value: fmt.Sprintf("http://%s.%s.svc", generateName, constants.KubeSphereNamespace),
},
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/health",
Port: intstr.FromInt32(8080),
},
},
PeriodSeconds: 10,
InitialDelaySeconds: 5,
},
},
},
},
},
},
}
if err := controllerutil.SetOwnerReference(repo, deployment, r.Scheme()); err != nil {
return err
}
if err := r.Create(ctx, deployment); err != nil {
return err
}
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: generateName,
Namespace: constants.KubeSphereNamespace,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Port: 80,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt32(8080),
},
},
Selector: map[string]string{
corev1alpha1.RepositoryReferenceLabel: repo.Name,
},
Type: corev1.ServiceTypeClusterIP,
},
}
if err := controllerutil.SetOwnerReference(repo, service, r.Scheme()); err != nil {
return err
}
if err := r.Create(ctx, service); err != nil {
return err
}
return nil
}
func (r *RepositoryReconciler) loadExtensionVersionSpecFrom(ctx context.Context, chartURL string, repo *corev1alpha1.Repository) (*corev1alpha1.ExtensionVersionSpec, error) {
logger := klog.FromContext(ctx)
var result *corev1alpha1.ExtensionVersionSpec
err := retry.OnError(retry.DefaultRetry, func(err error) bool {
return true
}, func() error {
req, err := http.NewRequest(http.MethodGet, chartURL, nil)
if err != nil {
return err
}
if repo.Spec.BasicAuth != nil {
req.SetBasicAuth(repo.Spec.BasicAuth.Username, repo.Spec.BasicAuth.Password)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
data, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf(string(data))
}
files, err := loader.LoadArchiveFiles(resp.Body)
if err != nil {
return err
}
for _, file := range files {
if file.Name == extensionFileName {
extensionVersionSpec := &corev1alpha1.ExtensionVersionSpec{}
if err := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(file.Data), 1024).Decode(extensionVersionSpec); err != nil {
logger.V(4).Info("invalid extension version spec: %s", string(file.Data))
return nil
}
result = extensionVersionSpec
break
}
}
if result == nil {
logger.V(6).Info("extension.yaml not found", "chart", chartURL)
return nil
}
if strings.HasPrefix(result.Icon, "http://") ||
strings.HasPrefix(result.Icon, "https://") ||
strings.HasPrefix(result.Icon, "data:image") {
return nil
}
absPath := strings.TrimPrefix(result.Icon, "./")
var iconData []byte
for _, file := range files {
if file.Name == absPath {
iconData = file.Data
break
}
}
if iconData == nil {
logger.V(4).Info("invalid extension icon path: %s", absPath)
return nil
}
mimeType := mime.TypeByExtension(path.Ext(result.Icon))
if mimeType == "" {
mimeType = http.DetectContentType(iconData)
}
base64EncodedData := base64.StdEncoding.EncodeToString(iconData)
result.Icon = fmt.Sprintf("data:%s;base64,%s", mimeType, base64EncodedData)
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to fetch chart data from %s: %s", chartURL, err)
}
return result, nil
}
func (r *RepositoryReconciler) removeSuspendedExtensionVersion(ctx context.Context, repo *corev1alpha1.Repository, extension *corev1alpha1.Extension, versions []corev1alpha1.ExtensionVersion) error {
extensionVersions := &corev1alpha1.ExtensionVersionList{}
if err := r.List(ctx, extensionVersions, client.MatchingLabels{corev1alpha1.ExtensionReferenceLabel: extension.Name, corev1alpha1.RepositoryReferenceLabel: repo.Name}); err != nil {
return fmt.Errorf("failed to list extension versions: %s", err)
}
for _, version := range extensionVersions.Items {
if checkIfSuspended(versions, version) {
r.logger.V(4).Info("delete suspended extension version", "name", version.Name, "version", version.Spec.Version)
if err := r.Delete(ctx, &version); err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return fmt.Errorf("failed to delete extension version: %s", err)
}
}
}
return nil
}
func checkIfSuspended(versions []corev1alpha1.ExtensionVersion, version corev1alpha1.ExtensionVersion) bool {
for _, v := range versions {
if v.Name == version.Name && v.Spec.Version == version.Spec.Version {
return false
}
}
return true
}

266
pkg/controller/core/util.go Normal file
View File

@@ -0,0 +1,266 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package core
import (
"bytes"
goerrors "errors"
"fmt"
"io"
"sort"
"strings"
yaml3 "gopkg.in/yaml.v3"
"github.com/Masterminds/semver/v3"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/storage/driver"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/klog/v2"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
"kubesphere.io/kubesphere/pkg/utils/hashutil"
"kubesphere.io/kubesphere/pkg/version"
)
func getRecommendedExtensionVersion(versions []corev1alpha1.ExtensionVersion, k8sVersion *semver.Version) (string, error) {
if len(versions) == 0 {
return "", nil
}
ksVersion, err := semver.NewVersion(version.Get().GitVersion)
if err != nil {
return "", fmt.Errorf("parse KubeSphere version failed: %v", err)
}
var matchedVersions []*semver.Version
for _, v := range versions {
var kubeVersionMatched, ksVersionMatched bool
if v.Spec.KubeVersion == "" {
kubeVersionMatched = true
} else {
targetKubeVersion, err := semver.NewConstraint(v.Spec.KubeVersion)
if err != nil {
// If the semver is invalid, just ignore it.
klog.Warningf("failed to parse Kubernetes version constraints: kubeVersion: %s, err: %s", v.Spec.KubeVersion, err)
continue
}
kubeVersionMatched = targetKubeVersion.Check(k8sVersion)
}
if v.Spec.KSVersion == "" {
ksVersionMatched = true
} else {
targetKSVersion, err := semver.NewConstraint(v.Spec.KSVersion)
if err != nil {
klog.Warningf("failed to parse KubeSphere version constraints: ksVersion: %s, err: %s", v.Spec.KSVersion, err)
continue
}
ksVersionMatched = targetKSVersion.Check(ksVersion)
}
if kubeVersionMatched && ksVersionMatched {
targetVersion, err := semver.NewVersion(v.Spec.Version)
if err != nil {
klog.V(2).Infof("parse version failed, extension version: %s, err: %s", v.Spec.Version, err)
continue
}
matchedVersions = append(matchedVersions, targetVersion)
}
}
if len(matchedVersions) == 0 {
return "", nil
}
sort.Slice(matchedVersions, func(i, j int) bool {
return matchedVersions[i].Compare(matchedVersions[j]) >= 0
})
return matchedVersions[0].Original(), nil
}
func getLatestExtensionVersion(versions []corev1alpha1.ExtensionVersion) *corev1alpha1.ExtensionVersion {
if len(versions) == 0 {
return nil
}
var latestVersion *corev1alpha1.ExtensionVersion
var latestSemver *semver.Version
for i := range versions {
currSemver, err := semver.NewVersion(versions[i].Spec.Version)
if err == nil {
if latestSemver == nil {
// the first valid semver
latestSemver = currSemver
latestVersion = &versions[i]
} else if latestSemver.LessThan(currSemver) {
// find a newer valid semver
latestSemver = currSemver
latestVersion = &versions[i]
}
} else {
// If the semver is invalid, just ignore it.
klog.Warningf("parse version failed, extension version: %s, err: %s", versions[i].Name, err)
}
}
return latestVersion
}
func isReleaseNotFoundError(err error) bool {
if err == nil {
return false
}
return strings.Contains(err.Error(), driver.ErrReleaseNotFound.Error())
}
func clusterConfig(sub *corev1alpha1.InstallPlan, clusterName string) []byte {
if clusterName == "" {
return []byte(sub.Spec.Config)
}
for cluster, config := range sub.Spec.ClusterScheduling.Overrides {
if cluster == clusterName {
return merge(sub.Spec.Config, config)
}
}
return []byte(sub.Spec.Config)
}
func merge(config string, override string) []byte {
config = strings.TrimSpace(config)
override = strings.TrimSpace(override)
if config == "" && override == "" {
return []byte("")
}
if override == "" {
return []byte(config)
}
if config == "" {
return []byte(override)
}
baseConf := map[string]interface{}{}
if err := yaml3.Unmarshal([]byte(config), &baseConf); err != nil {
klog.Warningf("failed to unmarshal config: %v", err)
}
overrideConf := map[string]interface{}{}
if err := yaml3.Unmarshal([]byte(override), overrideConf); err != nil {
klog.Warningf("failed to unmarshal config: %v", err)
}
finalConf := mergeValues(baseConf, overrideConf)
data, _ := yaml3.Marshal(finalConf)
return data
}
// mergeValues will merge source and destination map, preferring values from the source map
func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[string]interface{} {
for k, v := range src {
// If the key doesn't exist already, then just set the key to that value
if _, exists := dest[k]; !exists {
dest[k] = v
continue
}
nextMap, ok := v.(map[string]interface{})
// If it isn't another map, overwrite the value
if !ok {
dest[k] = v
continue
}
// Edge case: If the key exists in the destination, but isn't a map
destMap, isMap := dest[k].(map[string]interface{})
// If the source map has a map for this key, prefer it
if !isMap {
dest[k] = v
continue
}
// If we got to this point, it is a map in both, so merge them
dest[k] = mergeValues(destMap, nextMap)
}
return dest
}
func usesPermissions(mainChart *chart.Chart) (rbacv1.ClusterRole, rbacv1.Role) {
var clusterRole rbacv1.ClusterRole
var role rbacv1.Role
for _, file := range mainChart.Files {
if file.Name == permissionDefinitionFile {
// decoder := yaml.NewDecoder(bytes.NewReader(file.Data))
decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(file.Data), 1024)
for {
result := new(rbacv1.Role)
// create new spec here
// pass a reference to spec reference
err := decoder.Decode(&result)
// check it was parsed
if result == nil {
continue
}
// break the loop in case of EOF
if goerrors.Is(err, io.EOF) {
break
}
if err != nil {
return clusterRole, role
}
if result.Kind == "ClusterRole" {
clusterRole.Rules = append(clusterRole.Rules, result.Rules...)
}
if result.Kind == "Role" {
role.Rules = append(role.Rules, result.Rules...)
}
}
}
}
return clusterRole, role
}
func hasCluster(clusters []clusterv1alpha1.Cluster, clusterName string) bool {
for _, cluster := range clusters {
if cluster.Name == clusterName {
return true
}
}
return false
}
func versionChanged(plan *corev1alpha1.InstallPlan, cluster string) bool {
var oldVersion string
if cluster == "" {
oldVersion = plan.Status.Version
} else if plan.Status.ClusterSchedulingStatuses != nil {
oldVersion = plan.Status.ClusterSchedulingStatuses[cluster].Version
}
newVersion := plan.Spec.Extension.Version
if oldVersion == "" {
return false
}
return newVersion != oldVersion
}
func configChanged(sub *corev1alpha1.InstallPlan, cluster string) bool {
var oldConfigHash string
if cluster == "" {
oldConfigHash = sub.Status.InstallationStatus.ConfigHash
} else {
oldConfigHash = sub.Status.ClusterSchedulingStatuses[cluster].ConfigHash
}
newConfigHash := hashutil.FNVString(clusterConfig(sub, cluster))
if oldConfigHash == "" {
return true
}
return newConfigHash != oldConfigHash
}

View File

@@ -0,0 +1,117 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package core
import (
"testing"
"github.com/Masterminds/semver/v3"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
"kubesphere.io/kubesphere/pkg/version"
)
func TestGetRecommendedExtensionVersion(t *testing.T) {
k8sVersion120, _ := semver.NewVersion("1.20.0")
k8sVersion125, _ := semver.NewVersion("1.25.4")
tests := []struct {
name string
versions []corev1alpha1.ExtensionVersion
k8sVersion *semver.Version
ksVersion string
wanted string
}{
{
name: "normal test",
versions: []corev1alpha1.ExtensionVersion{
{
Spec: corev1alpha1.ExtensionVersionSpec{ // match
Version: "1.0.0",
KubeVersion: ">=1.19.0",
KSVersion: ">=4.0.0",
},
},
{
Spec: corev1alpha1.ExtensionVersionSpec{ // match
Version: "1.1.0",
KubeVersion: ">=1.20.0",
KSVersion: ">=4.0.0",
},
},
{
Spec: corev1alpha1.ExtensionVersionSpec{ // KubeVersion not match
Version: "1.2.0",
KubeVersion: ">=1.21.0",
KSVersion: ">=4.0.0",
},
},
{
Spec: corev1alpha1.ExtensionVersionSpec{ // KSVersion not match
Version: "1.3.0",
KubeVersion: ">=1.20.0",
KSVersion: ">=4.1.0",
},
},
},
k8sVersion: k8sVersion120,
ksVersion: "4.0.0",
wanted: "1.1.0",
},
{
name: "no matches test",
versions: []corev1alpha1.ExtensionVersion{
{
Spec: corev1alpha1.ExtensionVersionSpec{ // KubeVersion not match
Version: "1.2.0",
KubeVersion: ">=1.21.0",
KSVersion: ">=4.0.0",
},
},
{
Spec: corev1alpha1.ExtensionVersionSpec{ // KSVersion not match
Version: "1.3.0",
KubeVersion: ">=1.20.0",
KSVersion: ">=4.1.0",
},
},
},
k8sVersion: k8sVersion120,
ksVersion: "4.0.0",
wanted: "",
},
{
name: "match 1.3.0",
versions: []corev1alpha1.ExtensionVersion{
{
Spec: corev1alpha1.ExtensionVersionSpec{
Version: "1.2.0",
KubeVersion: ">=1.19.0",
KSVersion: ">=3.0.0",
},
},
{
Spec: corev1alpha1.ExtensionVersionSpec{
Version: "1.3.0",
KubeVersion: ">=1.19.0",
KSVersion: ">=4.0.0-alpha",
},
},
},
k8sVersion: k8sVersion125,
ksVersion: "4.0.0-beta.5+ae34",
wanted: "1.3.0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
version.SetGitVersion(tt.ksVersion)
if got, _ := getRecommendedExtensionVersion(tt.versions, tt.k8sVersion); got != tt.wanted {
t.Errorf("getRecommendedExtensionVersion() = %v, want %v", got, tt.wanted)
}
})
}
}

View File

@@ -1,498 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package destinationrule
import (
"context"
"fmt"
"reflect"
"time"
apinetworkingv1alpha3 "istio.io/api/networking/v1alpha3"
clientgonetworkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
istioclientset "istio.io/client-go/pkg/clientset/versioned"
istioinformers "istio.io/client-go/pkg/informers/externalversions/networking/v1alpha3"
istiolisters "istio.io/client-go/pkg/listers/networking/v1alpha3"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
informersv1 "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
listersv1 "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
servicemeshv1alpha2 "kubesphere.io/api/servicemesh/v1alpha2"
servicemeshclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
servicemeshinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh/v1alpha2"
servicemeshlisters "kubesphere.io/kubesphere/pkg/client/listers/servicemesh/v1alpha2"
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
)
const (
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
// sequence of delays between successive queuings of a service.
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
)
type DestinationRuleController struct {
client clientset.Interface
destinationRuleClient istioclientset.Interface
servicemeshClient servicemeshclient.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
serviceLister corelisters.ServiceLister
serviceSynced cache.InformerSynced
deploymentLister listersv1.DeploymentLister
deploymentSynced cache.InformerSynced
servicePolicyLister servicemeshlisters.ServicePolicyLister
servicePolicySynced cache.InformerSynced
destinationRuleLister istiolisters.DestinationRuleLister
destinationRuleSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
workerLoopPeriod time.Duration
}
func NewDestinationRuleController(deploymentInformer informersv1.DeploymentInformer,
destinationRuleInformer istioinformers.DestinationRuleInformer,
serviceInformer coreinformers.ServiceInformer,
servicePolicyInformer servicemeshinformers.ServicePolicyInformer,
client clientset.Interface,
destinationRuleClient istioclientset.Interface,
servicemeshClient servicemeshclient.Interface) *DestinationRuleController {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(func(format string, args ...interface{}) {
klog.Info(fmt.Sprintf(format, args))
})
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "destinationrule-controller"})
v := &DestinationRuleController{
client: client,
destinationRuleClient: destinationRuleClient,
servicemeshClient: servicemeshClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "destinationrule"),
workerLoopPeriod: time.Second,
}
v.deploymentLister = deploymentInformer.Lister()
v.deploymentSynced = deploymentInformer.Informer().HasSynced
deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.addDeployment,
DeleteFunc: v.deleteDeployment,
UpdateFunc: func(old, cur interface{}) {
v.addDeployment(cur)
},
})
v.serviceLister = serviceInformer.Lister()
v.serviceSynced = serviceInformer.Informer().HasSynced
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.enqueueService,
DeleteFunc: v.enqueueService,
UpdateFunc: func(old, cur interface{}) {
v.enqueueService(cur)
},
})
v.destinationRuleLister = destinationRuleInformer.Lister()
v.destinationRuleSynced = destinationRuleInformer.Informer().HasSynced
v.servicePolicyLister = servicePolicyInformer.Lister()
v.servicePolicySynced = servicePolicyInformer.Informer().HasSynced
servicePolicyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.addServicePolicy,
UpdateFunc: func(old, cur interface{}) {
v.addServicePolicy(cur)
},
DeleteFunc: v.addServicePolicy,
})
v.eventBroadcaster = broadcaster
v.eventRecorder = recorder
return v
}
func (v *DestinationRuleController) Start(ctx context.Context) error {
return v.Run(5, ctx.Done())
}
func (v *DestinationRuleController) Run(workers int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer v.queue.ShutDown()
klog.Info("starting destinationrule controller")
defer klog.Info("shutting down destinationrule controller")
if !cache.WaitForCacheSync(stopCh, v.serviceSynced, v.destinationRuleSynced, v.deploymentSynced, v.servicePolicySynced) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < workers; i++ {
go wait.Until(v.worker, v.workerLoopPeriod, stopCh)
}
<-stopCh
return nil
}
func (v *DestinationRuleController) enqueueService(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
return
}
v.queue.Add(key)
}
func (v *DestinationRuleController) worker() {
for v.processNextWorkItem() {
}
}
func (v *DestinationRuleController) processNextWorkItem() bool {
eKey, quit := v.queue.Get()
if quit {
return false
}
defer v.queue.Done(eKey)
err := v.syncService(eKey.(string))
v.handleErr(err, eKey)
return true
}
// main function of the reconcile for destinationrule
// destinationrule's name is same with the service that created it
func (v *DestinationRuleController) syncService(key string) error {
startTime := time.Now()
defer func() {
klog.V(4).Infof("Finished syncing service destinationrule %s in %s.", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
service, err := v.serviceLister.Services(namespace).Get(name)
if err != nil {
// delete the corresponding destinationrule if there is any, as the service has been deleted.
err = v.destinationRuleClient.NetworkingV1alpha3().DestinationRules(namespace).Delete(context.Background(), name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("delete destination rule failed %s/%s, error %v.", namespace, name, err)
return err
}
// delete orphan service policy if there is any
err = v.servicemeshClient.ServicemeshV1alpha2().ServicePolicies(namespace).Delete(context.Background(), name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("delete orphan service policy %s/%s failed, %#v", namespace, name, err)
return err
}
return nil
}
if len(service.Labels) < len(servicemesh.ApplicationLabels) ||
!servicemesh.IsApplicationComponent(service.Labels) ||
!servicemesh.IsServicemeshEnabled(service.Annotations) ||
len(service.Spec.Ports) == 0 {
// services don't have enough labels to create a destinationrule
// or they don't have necessary labels
// or they don't have servicemesh enabled
// or they don't have any ports defined
return nil
}
appName := servicemesh.GetComponentName(&service.ObjectMeta)
// fetch all deployments that match with service selector
deployments, err := v.deploymentLister.Deployments(namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated())
if err != nil {
return err
}
subsets := make([]*apinetworkingv1alpha3.Subset, 0)
for _, deployment := range deployments {
// not a valid deployment we required
if !servicemesh.IsApplicationComponent(deployment.Labels) ||
!servicemesh.IsApplicationComponent(deployment.Spec.Selector.MatchLabels) ||
deployment.Status.ReadyReplicas == 0 ||
!servicemesh.IsServicemeshEnabled(deployment.Annotations) {
continue
}
version := servicemesh.GetComponentVersion(&deployment.ObjectMeta)
if len(version) == 0 {
klog.V(4).Infof("Deployment %s doesn't have a version label", types.NamespacedName{Namespace: deployment.Namespace, Name: deployment.Name}.String())
continue
}
subset := &apinetworkingv1alpha3.Subset{
Name: servicemesh.NormalizeVersionName(version),
Labels: map[string]string{
servicemesh.VersionLabel: version,
},
}
subsets = append(subsets, subset)
}
currentDestinationRule, err := v.destinationRuleLister.DestinationRules(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
currentDestinationRule = &clientgonetworkingv1alpha3.DestinationRule{
ObjectMeta: metav1.ObjectMeta{
Name: service.Name,
Labels: service.Labels,
},
Spec: apinetworkingv1alpha3.DestinationRule{
Host: name,
},
}
} else {
klog.Error(err, "Couldn't get destinationrule for service", "key", key)
return err
}
}
// fetch all servicepolicies associated to this service
servicePolicies, err := v.servicePolicyLister.ServicePolicies(namespace).List(labels.SelectorFromSet(map[string]string{servicemesh.AppLabel: appName}))
if err != nil {
klog.Error(err, "could not list service policies is namespace with component name", "namespace", namespace, "name", appName)
return err
}
dr := currentDestinationRule.DeepCopy()
dr.Spec.TrafficPolicy = nil
dr.Spec.Subsets = subsets
//
if len(servicePolicies) > 0 {
if len(servicePolicies) > 1 {
err = fmt.Errorf("more than one service policy associated with service %s/%s is forbidden", namespace, name)
klog.Error(err, "")
return err
}
sp := servicePolicies[0]
if sp.Spec.Template.Spec.TrafficPolicy != nil {
dr.Spec.TrafficPolicy = sp.Spec.Template.Spec.TrafficPolicy
}
for _, subset := range sp.Spec.Template.Spec.Subsets {
for i := range dr.Spec.Subsets {
if subset.Name == dr.Spec.Subsets[i].Name && subset.TrafficPolicy != nil {
dr.Spec.Subsets[i].TrafficPolicy = subset.TrafficPolicy
}
}
}
}
createDestinationRule := len(currentDestinationRule.ResourceVersion) == 0
if !createDestinationRule && reflect.DeepEqual(currentDestinationRule.Spec, dr.Spec) &&
reflect.DeepEqual(currentDestinationRule.Labels, service.Labels) {
klog.V(5).Info("destinationrule are equal, skipping update", "key", types.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String())
return nil
}
newDestinationRule := currentDestinationRule.DeepCopy()
newDestinationRule.Spec = dr.Spec
newDestinationRule.Labels = service.Labels
if newDestinationRule.Annotations == nil {
newDestinationRule.Annotations = make(map[string]string)
}
if createDestinationRule {
_, err = v.destinationRuleClient.NetworkingV1alpha3().DestinationRules(namespace).Create(context.Background(), newDestinationRule, metav1.CreateOptions{})
} else {
_, err = v.destinationRuleClient.NetworkingV1alpha3().DestinationRules(namespace).Update(context.Background(), newDestinationRule, metav1.UpdateOptions{})
}
if err != nil {
if createDestinationRule && errors.IsForbidden(err) {
// A request is forbidden primarily for two reasons:
// 1. namespace is terminating, endpoint creation is not allowed by default.
// 2. policy is misconfigured, in which case no service would function anywhere.
// Given the frequency of 1, we log at a lower level.
klog.V(5).Info("Forbidden from creating endpoints", "error", err)
}
if createDestinationRule {
v.eventRecorder.Event(newDestinationRule, v1.EventTypeWarning, "FailedToCreateDestinationRule", fmt.Sprintf("Failed to create destinationrule for service %v/%v: %v", service.Namespace, service.Name, err))
} else {
v.eventRecorder.Event(newDestinationRule, v1.EventTypeWarning, "FailedToUpdateDestinationRule", fmt.Sprintf("Failed to update destinationrule for service %v/%v: %v", service.Namespace, service.Name, err))
}
return err
}
return nil
}
// When a destinationrule is added, figure out which service it will be used
// and enqueue it. obj must have *appsv1.Deployment type
func (v *DestinationRuleController) addDeployment(obj interface{}) {
deploy := obj.(*appsv1.Deployment)
// not a application component
if !servicemesh.IsApplicationComponent(deploy.Labels) || !servicemesh.IsApplicationComponent(deploy.Spec.Selector.MatchLabels) {
return
}
services, err := v.getDeploymentServiceMemberShip(deploy)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to get deployment %s/%s's service memberships", deploy.Namespace, deploy.Name))
return
}
for key := range services {
v.queue.Add(key)
}
}
func (v *DestinationRuleController) deleteDeployment(obj interface{}) {
if _, ok := obj.(*appsv1.Deployment); ok {
v.addDeployment(obj)
return
}
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
return
}
deploy, ok := tombstone.Obj.(*appsv1.Deployment)
if !ok {
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a deployment %#v", obj))
return
}
v.addDeployment(deploy)
}
func (v *DestinationRuleController) getDeploymentServiceMemberShip(deployment *appsv1.Deployment) (sets.Set[string], error) {
set := sets.New[string]()
allServices, err := v.serviceLister.Services(deployment.Namespace).List(labels.Everything())
if err != nil {
return set, err
}
for i := range allServices {
service := allServices[i]
if service.Spec.Selector == nil ||
!servicemesh.IsApplicationComponent(service.Labels) ||
!servicemesh.IsServicemeshEnabled(service.Annotations) {
// services with nil selectors match nothing, not everything.
continue
}
selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
if selector.Matches(labels.Set(deployment.Spec.Selector.MatchLabels)) {
key, err := cache.MetaNamespaceKeyFunc(service)
if err != nil {
return nil, err
}
set.Insert(key)
}
}
return set, nil
}
func (v *DestinationRuleController) addServicePolicy(obj interface{}) {
servicePolicy := obj.(*servicemeshv1alpha2.ServicePolicy)
appName := servicePolicy.Labels[servicemesh.AppLabel]
services, err := v.serviceLister.Services(servicePolicy.Namespace).List(labels.SelectorFromSet(map[string]string{servicemesh.AppLabel: appName}))
if err != nil {
klog.Error(err, "cannot list services", "namespace", servicePolicy.Namespace, "name", appName)
utilruntime.HandleError(fmt.Errorf("cannot list services in namespace %s, with component name %v", servicePolicy.Namespace, appName))
return
}
set := sets.New[string]()
for _, service := range services {
key, err := cache.MetaNamespaceKeyFunc(service)
if err != nil {
utilruntime.HandleError(err)
continue
}
set.Insert(key)
}
// avoid enqueue a key multiple times
for key := range set {
v.queue.Add(key)
}
}
func (v *DestinationRuleController) handleErr(err error, key interface{}) {
if err == nil {
v.queue.Forget(key)
return
}
if v.queue.NumRequeues(key) < maxRetries {
klog.V(2).Info("Error syncing virtualservice for service, retrying.", "key", key, "error", err)
v.queue.AddRateLimited(key)
return
}
klog.V(4).Info("Dropping service out of the queue", "key", key, "error", err)
v.queue.Forget(key)
utilruntime.HandleError(err)
}

View File

@@ -1,383 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package destinationrule
import (
"context"
"fmt"
"testing"
apiv1alpha3 "istio.io/api/networking/v1alpha3"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
istioinformers "istio.io/client-go/pkg/informers/externalversions"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"kubesphere.io/api/servicemesh/v1alpha2"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
)
var (
alwaysReady = func() bool { return true }
replicas = int32(2)
)
func newDeployments(service *corev1.Service, version string) *appsv1.Deployment {
lbs := service.Labels
lbs["version"] = version
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", service.Name, version),
Namespace: metav1.NamespaceDefault,
Labels: lbs,
Annotations: service.Annotations,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: lbs,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: lbs,
Annotations: service.Annotations,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "c1",
Image: "nginx:latest",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
ContainerPort: 443,
Protocol: corev1.ProtocolTCP,
},
{
Name: "mysql",
ContainerPort: 3306,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
},
},
Status: appsv1.DeploymentStatus{
AvailableReplicas: replicas,
ReadyReplicas: replicas,
Replicas: replicas,
},
}
return deployment
}
func newDestinationRule(service *corev1.Service, deployments ...*appsv1.Deployment) *v1alpha3.DestinationRule {
dr := &v1alpha3.DestinationRule{
ObjectMeta: metav1.ObjectMeta{
Name: service.Name,
Namespace: service.Namespace,
Labels: service.Labels,
Annotations: make(map[string]string),
},
Spec: apiv1alpha3.DestinationRule{
Host: service.Name,
},
}
dr.Spec.Subsets = []*apiv1alpha3.Subset{}
for _, deployment := range deployments {
subset := &apiv1alpha3.Subset{
Name: servicemesh.GetComponentVersion(&deployment.ObjectMeta),
Labels: map[string]string{
"version": servicemesh.GetComponentVersion(&deployment.ObjectMeta),
},
}
dr.Spec.Subsets = append(dr.Spec.Subsets, subset)
}
return dr
}
func newService(name string) *corev1.Service {
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
"app.kubernetes.io/name": "bookinfo",
"app.kubernetes.io/version": "1",
"app": "foo",
},
Annotations: map[string]string{
"servicemesh.kubesphere.io/enabled": "true",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
},
{
Name: "mysql",
Port: 3306,
Protocol: corev1.ProtocolTCP,
},
},
Selector: map[string]string{
"app.kubernetes.io/name": "bookinfo",
"app.kubernetes.io/version": "1",
"app": "foo",
},
Type: corev1.ServiceTypeClusterIP,
},
Status: corev1.ServiceStatus{},
}
return service
}
func newServicePolicy(name string, service *corev1.Service, deployments ...*appsv1.Deployment) *v1alpha2.ServicePolicy {
sp := &v1alpha2.ServicePolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
Labels: service.Labels,
Annotations: service.Annotations,
},
Spec: v1alpha2.ServicePolicySpec{
Template: v1alpha2.DestinationRuleSpecTemplate{
Spec: apiv1alpha3.DestinationRule{
Host: service.Name,
},
},
},
}
sp.Spec.Template.Spec.Subsets = []*apiv1alpha3.Subset{}
for _, deployment := range deployments {
subset := &apiv1alpha3.Subset{
Name: servicemesh.GetComponentVersion(&deployment.ObjectMeta),
Labels: map[string]string{
"version": servicemesh.GetComponentVersion(&deployment.ObjectMeta),
},
}
sp.Spec.Template.Spec.Subsets = append(sp.Spec.Template.Spec.Subsets, subset)
}
return sp
}
type fixture struct {
t testing.TB
kubeClient *kubefake.Clientset
istioClient *istiofake.Clientset
servicemeshClient *fake.Clientset
serviceLister []*corev1.Service
deploymentLister []*appsv1.Deployment
drLister []*v1alpha3.DestinationRule
spLister []*v1alpha2.ServicePolicy
kubeObjects []runtime.Object
istioObjects []runtime.Object
servicemeshObjects []runtime.Object
}
func newFixture(t testing.TB) *fixture {
f := &fixture{}
f.t = t
f.kubeObjects = []runtime.Object{}
f.istioObjects = []runtime.Object{}
f.servicemeshObjects = []runtime.Object{}
return f
}
func (f *fixture) newController() (*DestinationRuleController, kubeinformers.SharedInformerFactory, istioinformers.SharedInformerFactory, informers.SharedInformerFactory, error) {
f.kubeClient = kubefake.NewSimpleClientset(f.kubeObjects...)
f.servicemeshClient = fake.NewSimpleClientset(f.servicemeshObjects...)
f.istioClient = istiofake.NewSimpleClientset(f.istioObjects...)
kubeInformers := kubeinformers.NewSharedInformerFactory(f.kubeClient, 0)
istioInformers := istioinformers.NewSharedInformerFactory(f.istioClient, 0)
servicemeshInformers := informers.NewSharedInformerFactory(f.servicemeshClient, 0)
c := NewDestinationRuleController(kubeInformers.Apps().V1().Deployments(),
istioInformers.Networking().V1alpha3().DestinationRules(),
kubeInformers.Core().V1().Services(),
servicemeshInformers.Servicemesh().V1alpha2().ServicePolicies(),
f.kubeClient,
f.istioClient,
f.servicemeshClient)
c.eventRecorder = &record.FakeRecorder{}
c.destinationRuleSynced = alwaysReady
c.deploymentSynced = alwaysReady
c.servicePolicySynced = alwaysReady
c.serviceSynced = alwaysReady
for _, s := range f.serviceLister {
kubeInformers.Core().V1().Services().Informer().GetIndexer().Add(s)
}
for _, d := range f.drLister {
istioInformers.Networking().V1alpha3().DestinationRules().Informer().GetIndexer().Add(d)
}
for _, d := range f.deploymentLister {
kubeInformers.Apps().V1().Deployments().Informer().GetIndexer().Add(d)
}
for _, s := range f.spLister {
servicemeshInformers.Servicemesh().V1alpha2().ServicePolicies().Informer().GetIndexer().Add(s)
}
return c, kubeInformers, istioInformers, servicemeshInformers, nil
}
func (f *fixture) run(service *corev1.Service, expected *v1alpha3.DestinationRule, startInformers bool, expectedError bool) {
c, kubeInformers, istioInformers, servicemeshInformers, err := f.newController()
if err != nil {
f.t.Fatal(err)
}
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
kubeInformers.Start(stopCh)
istioInformers.Start(stopCh)
servicemeshInformers.Start(stopCh)
}
key, err := cache.MetaNamespaceKeyFunc(service)
if err != nil {
f.t.Fatal(err)
}
err = c.syncService(key)
if !expectedError && err != nil {
f.t.Fatalf("error syncing service: %v", err)
} else if expectedError && err == nil {
f.t.Fatal("expected error syncing service, got nil")
}
got, err := c.destinationRuleClient.NetworkingV1alpha3().DestinationRules(service.Namespace).Get(context.Background(), service.Name, metav1.GetOptions{})
if err != nil {
f.t.Fatal(err)
}
if unequals := reflectutils.Equal(got, expected); len(unequals) != 0 {
f.t.Errorf("expected %#v, got %#v, unequal fields:", expected, got)
for _, unequal := range unequals {
f.t.Error(unequal)
}
}
}
func runServicePolicy(t *testing.T, service *corev1.Service, sp *v1alpha2.ServicePolicy, expected *v1alpha3.DestinationRule, expectedError bool, deployments ...*appsv1.Deployment) {
f := newFixture(t)
f.kubeObjects = append(f.kubeObjects, service)
f.serviceLister = append(f.serviceLister, service)
for _, deployment := range deployments {
f.kubeObjects = append(f.kubeObjects, deployment)
f.deploymentLister = append(f.deploymentLister, deployment)
}
if sp != nil {
f.servicemeshObjects = append(f.servicemeshObjects, sp)
f.spLister = append(f.spLister, sp)
}
f.run(service, expected, true, expectedError)
}
func TestServicePolicy(t *testing.T) {
defaultService := newService("foo")
defaultDeploymentV1 := newDeployments(defaultService, "v1")
defaultDeploymentV2 := newDeployments(defaultService, "v2")
defaultServicePolicy := newServicePolicy("foo", defaultService, defaultDeploymentV1, defaultDeploymentV2)
defaultExpected := newDestinationRule(defaultService, defaultDeploymentV1, defaultDeploymentV2)
t.Run("should create default destination rule", func(t *testing.T) {
runServicePolicy(t, defaultService, nil, defaultExpected, false, defaultDeploymentV1, defaultDeploymentV2)
})
t.Run("should create destination rule only to v1", func(t *testing.T) {
deploymentV2 := defaultDeploymentV2.DeepCopy()
deploymentV2.Status.AvailableReplicas = 0
deploymentV2.Status.ReadyReplicas = 0
expected := defaultExpected.DeepCopy()
expected.Spec.Subsets = expected.Spec.Subsets[:1]
runServicePolicy(t, defaultService, nil, expected, false, defaultDeploymentV1, deploymentV2)
})
t.Run("should create destination rule match service policy", func(t *testing.T) {
sp := defaultServicePolicy.DeepCopy()
sp.Spec.Template.Spec.TrafficPolicy = &apiv1alpha3.TrafficPolicy{
LoadBalancer: &apiv1alpha3.LoadBalancerSettings{
LbPolicy: &apiv1alpha3.LoadBalancerSettings_Simple{
Simple: apiv1alpha3.LoadBalancerSettings_ROUND_ROBIN,
},
},
ConnectionPool: &apiv1alpha3.ConnectionPoolSettings{
Http: &apiv1alpha3.ConnectionPoolSettings_HTTPSettings{
Http1MaxPendingRequests: 10,
Http2MaxRequests: 20,
MaxRequestsPerConnection: 5,
MaxRetries: 4,
},
},
OutlierDetection: &apiv1alpha3.OutlierDetection{
ConsecutiveErrors: 5,
MaxEjectionPercent: 10,
MinHealthPercent: 20,
},
}
expected := defaultExpected.DeepCopy()
expected.Spec.TrafficPolicy = sp.Spec.Template.Spec.TrafficPolicy
runServicePolicy(t, defaultService, sp, expected, false, defaultDeploymentV1, defaultDeploymentV2)
})
}

View File

@@ -0,0 +1,66 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package extension
import (
"context"
"fmt"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
extensionsv1alpha1 "kubesphere.io/api/extensions/v1alpha1"
)
var _ admission.CustomValidator = &APIServiceWebhook{}
var _ kscontroller.Controller = &APIServiceWebhook{}
func (r *APIServiceWebhook) Name() string {
return "apiservice-webhook"
}
type APIServiceWebhook struct {
client.Client
}
func (r *APIServiceWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return r.validateAPIService(ctx, obj.(*extensionsv1alpha1.APIService))
}
func (r *APIServiceWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
return r.validateAPIService(ctx, newObj.(*extensionsv1alpha1.APIService))
}
func (r *APIServiceWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (r *APIServiceWebhook) validateAPIService(ctx context.Context, service *extensionsv1alpha1.APIService) (admission.Warnings, error) {
apiServices := &extensionsv1alpha1.APIServiceList{}
if err := r.Client.List(ctx, apiServices, &client.ListOptions{}); err != nil {
return nil, err
}
for _, apiService := range apiServices.Items {
if apiService.Name != service.Name &&
apiService.Spec.Group == service.Spec.Group &&
apiService.Spec.Version == service.Spec.Version {
return nil, fmt.Errorf("APIService %s/%s is already exists", service.Spec.Group, service.Spec.Version)
}
}
return nil, nil
}
func (r *APIServiceWebhook) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
WithValidator(r).
For(&extensionsv1alpha1.APIService{}).
Complete()
}

View File

@@ -0,0 +1,131 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package extension
import (
"context"
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
extensionsv1alpha1 "kubesphere.io/api/extensions/v1alpha1"
)
var _ admission.CustomValidator = &ExtensionEntryWebhook{}
var _ kscontroller.Controller = &ExtensionEntryWebhook{}
func (r *ExtensionEntryWebhook) Name() string {
return "extensionentry-webhook"
}
type ExtensionEntryWebhook struct {
client.Client
}
func (r *ExtensionEntryWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return r.validateExtensionEntry(ctx, obj.(*extensionsv1alpha1.ExtensionEntry))
}
func (r *ExtensionEntryWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
return r.validateExtensionEntry(ctx, newObj.(*extensionsv1alpha1.ExtensionEntry))
}
func (r *ExtensionEntryWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (r *ExtensionEntryWebhook) validateExtensionEntry(ctx context.Context, extensionEntry *extensionsv1alpha1.ExtensionEntry) (admission.Warnings, error) {
entryNameSet := sets.NewString()
entryLinkSet := sets.NewString()
for index, entry := range extensionEntry.Spec.Entries {
entryProps := make(map[string]interface{})
if err := json.Unmarshal(entry.Raw, &entryProps); err != nil {
return nil, err
}
entryNameVal, ok := entryProps["name"]
if !ok {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].name cannot be empty", extensionEntry.Name, index)
}
entryName, ok := entryNameVal.(string)
if !ok {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].name %s must be string", extensionEntry.Name, index, entryName)
}
if entryNameSet.Has(entryName) {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].name %s is duplicated", extensionEntry.Name, index, entryName)
}
entryNameSet.Insert(entryName)
entryLinkVal, ok := entryProps["link"]
if !ok {
continue
}
entryLink, ok := entryLinkVal.(string)
if !ok {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].link %s must be string", extensionEntry.Name, index, entryName)
}
if entryLinkSet.Has(entryLink) {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].link %s is duplicated", extensionEntry.Name, index, entryLink)
}
entryLinkSet.Insert(entryLink)
}
extensionEntries := &extensionsv1alpha1.ExtensionEntryList{}
if err := r.Client.List(ctx, extensionEntries, &client.ListOptions{}); err != nil {
return nil, err
}
for _, target := range extensionEntries.Items {
if target.Name == extensionEntry.Name {
continue
}
for index, targetEntry := range target.Spec.Entries {
entryProps := make(map[string]interface{})
if err := json.Unmarshal(targetEntry.Raw, &entryProps); err != nil {
return nil, err
}
entryNameVal, ok := entryProps["name"]
if !ok {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].name cannot be empty", target.Name, index)
}
entryName, ok := entryNameVal.(string)
if !ok {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].name %s must be string", extensionEntry.Name, index, entryName)
}
if entryNameSet.Has(entryName) {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[].name %s is already exists", extensionEntry.Name, entryName)
}
entryLinkVal, ok := entryProps["link"]
if !ok {
continue
}
entryLink, ok := entryLinkVal.(string)
if !ok {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[%d].link %s must be string", extensionEntry.Name, index, entryName)
}
if entryLinkSet.Has(entryLink) {
return nil, fmt.Errorf("ExtensionEntry %s spec.entries[].link %s is already exists", extensionEntry.Name, entryLink)
}
}
}
return nil, nil
}
func (r *ExtensionEntryWebhook) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
WithValidator(r).
For(&extensionsv1alpha1.ExtensionEntry{}).
Complete()
}

View File

@@ -0,0 +1,93 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package extension
import (
"context"
"fmt"
"strings"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"kubesphere.io/api/core/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
extensionsv1alpha1 "kubesphere.io/api/extensions/v1alpha1"
)
var _ admission.CustomValidator = &JSBundleWebhook{}
var _ admission.CustomDefaulter = &JSBundleWebhook{}
var _ kscontroller.Controller = &JSBundleWebhook{}
type JSBundleWebhook struct {
client.Client
}
func (r *JSBundleWebhook) Name() string {
return "jsbundle-webhook"
}
func (r *JSBundleWebhook) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *JSBundleWebhook) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
WithValidator(r).
WithDefaulter(r).
For(&extensionsv1alpha1.JSBundle{}).
Complete()
}
var _ admission.CustomDefaulter = &JSBundleWebhook{}
func (r *JSBundleWebhook) Default(_ context.Context, obj runtime.Object) error {
jsBundle := obj.(*extensionsv1alpha1.JSBundle)
extensionName := jsBundle.Labels[v1alpha1.ExtensionReferenceLabel]
if jsBundle.Status.Link == "" && extensionName != "" {
jsBundle.Status.Link = fmt.Sprintf("/dist/%s/index.js", extensionName)
}
return nil
}
func (r *JSBundleWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return r.validateJSBundle(ctx, obj.(*extensionsv1alpha1.JSBundle))
}
func (r *JSBundleWebhook) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) {
return r.validateJSBundle(ctx, newObj.(*extensionsv1alpha1.JSBundle))
}
func (r *JSBundleWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (r *JSBundleWebhook) validateJSBundle(ctx context.Context, jsBundle *extensionsv1alpha1.JSBundle) (admission.Warnings, error) {
if jsBundle.Status.Link == "" {
return nil, nil
}
extensionName := jsBundle.Labels[v1alpha1.ExtensionReferenceLabel]
if extensionName != "" && !strings.HasPrefix(jsBundle.Status.Link, fmt.Sprintf("/dist/%s", extensionName)) {
return nil, fmt.Errorf("the prefix of status.link must be in the format /dist/%s/", extensionName)
}
jsBundles := &extensionsv1alpha1.JSBundleList{}
if err := r.Client.List(ctx, jsBundles, &client.ListOptions{}); err != nil {
return nil, err
}
for _, item := range jsBundles.Items {
if item.Name != jsBundle.Name &&
item.Status.Link == jsBundle.Status.Link {
return nil, fmt.Errorf("JSBundle %s is already exists", jsBundle.Status.Link)
}
}
return nil, nil
}

View File

@@ -0,0 +1,76 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package extension
import (
"context"
"fmt"
"strings"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
extensionsv1alpha1 "kubesphere.io/api/extensions/v1alpha1"
)
var _ admission.CustomValidator = &ReverseProxyWebhook{}
var _ kscontroller.Controller = &ReverseProxyWebhook{}
func (r *ReverseProxyWebhook) Name() string {
return "reverseproxy-webhook"
}
type ReverseProxyWebhook struct {
client.Client
}
func (r *ReverseProxyWebhook) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewWebhookManagedBy(mgr).
WithValidator(r).
For(&extensionsv1alpha1.ReverseProxy{}).
Complete()
}
func (r *ReverseProxyWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return r.validateReverseProxy(ctx, obj.(*extensionsv1alpha1.ReverseProxy))
}
func (r *ReverseProxyWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
return r.validateReverseProxy(ctx, newObj.(*extensionsv1alpha1.ReverseProxy))
}
func (r *ReverseProxyWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (r *ReverseProxyWebhook) validateReverseProxy(ctx context.Context, proxy *extensionsv1alpha1.ReverseProxy) (admission.Warnings, error) {
reverseProxies := &extensionsv1alpha1.ReverseProxyList{}
if err := r.Client.List(ctx, reverseProxies, &client.ListOptions{}); err != nil {
return nil, err
}
for _, reverseProxy := range reverseProxies.Items {
if reverseProxy.Name == proxy.Name {
continue
}
if reverseProxy.Spec.Matcher.Method != proxy.Spec.Matcher.Method &&
reverseProxy.Spec.Matcher.Method != "*" {
continue
}
if reverseProxy.Spec.Matcher.Path == proxy.Spec.Matcher.Path {
return nil, fmt.Errorf("ReverseProxy %v is already exists", proxy.Spec.Matcher)
}
if strings.HasSuffix(reverseProxy.Spec.Matcher.Path, "*") &&
strings.HasPrefix(proxy.Spec.Matcher.Path, strings.TrimRight(reverseProxy.Spec.Matcher.Path, "*")) {
return nil, fmt.Errorf("ReverseProxy %v is already exists", proxy.Spec.Matcher)
}
}
return nil, nil
}

View File

@@ -1,358 +1,221 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package globalrole
import (
"context"
"encoding/json"
"fmt"
"reflect"
"time"
"strings"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/constants"
rbachelper "kubesphere.io/kubesphere/pkg/componenthelper/auth/rbac"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"kubesphere.io/kubesphere/pkg/controller/cluster/predicate"
clusterutils "kubesphere.io/kubesphere/pkg/controller/cluster/utils"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
successSynced = "Synced"
// is synced successfully
messageResourceSynced = "GlobalRole synced successfully"
controllerName = "globalrole-controller"
controllerName = "globalrole"
finalizer = "finalizers.kubesphere.io/globalroles"
)
type Controller struct {
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
globalRoleInformer iamv1alpha2informers.GlobalRoleInformer
globalRoleLister iamv1alpha2listers.GlobalRoleLister
globalRoleSynced cache.InformerSynced
fedGlobalRoleCache cache.Store
fedGlobalRoleCacheController cache.Controller
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
func (r *Reconciler) Name() string {
return controllerName
}
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, globalRoleInformer iamv1alpha2informers.GlobalRoleInformer,
fedGlobalRoleCache cache.Store, fedGlobalRoleCacheController cache.Controller) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &Controller{
k8sClient: k8sClient,
ksClient: ksClient,
globalRoleInformer: globalRoleInformer,
globalRoleLister: globalRoleInformer.Lister(),
globalRoleSynced: globalRoleInformer.Informer().HasSynced,
fedGlobalRoleCache: fedGlobalRoleCache,
fedGlobalRoleCacheController: fedGlobalRoleCacheController,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GlobalRole"),
recorder: recorder,
}
klog.Info("Setting up event handlers")
globalRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.enqueueGlobalRole,
UpdateFunc: func(old, new interface{}) {
ctl.enqueueGlobalRole(new)
},
DeleteFunc: ctl.enqueueGlobalRole,
})
return ctl
func (r *Reconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.Info("Starting GlobalRole controller")
// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.globalRoleSynced, c.fedGlobalRoleCacheController.HasSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
type Reconciler struct {
client.Client
logger logr.Logger
recorder record.EventRecorder
helper *rbachelper.Helper
clusterClient clusterclient.Interface
}
func (c *Controller) enqueueGlobalRole(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.clusterClient = mgr.ClusterClient
r.Client = mgr.GetClient()
r.helper = rbachelper.NewHelper(r.Client)
r.logger = mgr.GetLogger().WithName(controllerName)
r.recorder = mgr.GetEventRecorderFor(controllerName)
return builder.
ControllerManagedBy(mgr).
For(&iamv1beta1.GlobalRole{}).
Watches(
&clusterv1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.mapper),
builder.WithPredicates(predicate.ClusterStatusChangedPredicate{}),
).
WithOptions(controller.Options{MaxConcurrentReconciles: 2}).
Named(controllerName).
Complete(r)
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
func (r *Reconciler) mapper(ctx context.Context, o client.Object) []reconcile.Request {
cluster := o.(*clusterv1alpha1.Cluster)
var requests []reconcile.Request
if !clusterutils.IsClusterReady(cluster) {
return requests
}
globalRoles := &iamv1beta1.GlobalRoleList{}
if err := r.List(ctx, globalRoles); err != nil {
r.logger.Error(err, "failed to list global roles")
return requests
}
for _, globalRole := range globalRoles.Items {
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: globalRole.Name}})
}
return requests
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
globalRole := &iamv1beta1.GlobalRole{}
if err := r.Get(ctx, req.NamespacedName, globalRole); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
if globalRole.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !controllerutil.ContainsFinalizer(globalRole, finalizer) {
expected := globalRole.DeepCopy()
controllerutil.AddFinalizer(expected, finalizer)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(globalRole))
}
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.reconcile(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(globalRole, finalizer) {
if err := r.deleteRelatedResources(ctx, globalRole); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete related resources: %s", err)
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(globalRole, finalizer)
if err := r.Update(ctx, globalRole, &client.UpdateOptions{}); err != nil {
return ctrl.Result{}, err
}
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully synced %s:%s", "key", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
return ctrl.Result{}, nil
}
return true
}
// reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) reconcile(key string) error {
globalRole, err := c.globalRoleLister.Get(key)
if err != nil {
// The resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("globalrole '%s' in work queue no longer exists", key))
return nil
if globalRole.AggregationRoleTemplates != nil {
if err := r.helper.AggregationRole(ctx, rbachelper.GlobalRoleRuleOwner{GlobalRole: globalRole}, r.recorder); err != nil {
return ctrl.Result{}, err
}
klog.Error(err)
return err
}
if err = c.multiClusterSync(context.Background(), globalRole); err != nil {
klog.Error(err)
return err
if err := r.multiClusterSync(ctx, globalRole); err != nil {
return ctrl.Result{}, err
}
c.recorder.Event(globalRole, corev1.EventTypeNormal, successSynced, messageResourceSynced)
return nil
return ctrl.Result{}, nil
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) multiClusterSync(ctx context.Context, globalRole *iamv1alpha2.GlobalRole) error {
if err := c.ensureNotControlledByKubefed(ctx, globalRole); err != nil {
klog.Error(err)
return err
}
obj, exist, err := c.fedGlobalRoleCache.GetByKey(globalRole.Name)
if !exist {
return c.createFederatedGlobalRole(ctx, globalRole)
}
func (r *Reconciler) deleteRelatedResources(ctx context.Context, globalRole *iamv1beta1.GlobalRole) error {
clusters, err := r.clusterClient.ListClusters(ctx)
if err != nil {
klog.Error(err)
return err
return fmt.Errorf("failed to list clusters: %s", err)
}
var federatedGlobalRole iamv1alpha2.FederatedRole
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedGlobalRole); err != nil {
klog.Error(err)
return err
}
if !reflect.DeepEqual(federatedGlobalRole.Spec.Template.Rules, globalRole.Rules) ||
!reflect.DeepEqual(federatedGlobalRole.Spec.Template.Labels, globalRole.Labels) ||
!reflect.DeepEqual(federatedGlobalRole.Spec.Template.Annotations, globalRole.Annotations) {
federatedGlobalRole.Spec.Template.Rules = globalRole.Rules
federatedGlobalRole.Spec.Template.Annotations = globalRole.Annotations
federatedGlobalRole.Spec.Template.Labels = globalRole.Labels
return c.updateFederatedGlobalRole(ctx, &federatedGlobalRole)
}
return nil
}
func (c *Controller) createFederatedGlobalRole(ctx context.Context, globalRole *iamv1alpha2.GlobalRole) error {
federatedGlobalRole := &iamv1alpha2.FederatedRole{
TypeMeta: metav1.TypeMeta{
Kind: iamv1alpha2.FedGlobalRoleKind,
APIVersion: iamv1alpha2.FedGlobalRoleResource.Group + "/" + iamv1alpha2.FedGlobalRoleResource.Version,
},
ObjectMeta: metav1.ObjectMeta{
Name: globalRole.Name,
},
Spec: iamv1alpha2.FederatedRoleSpec{
Template: iamv1alpha2.RoleTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: globalRole.Labels,
Annotations: globalRole.Annotations,
},
Rules: globalRole.Rules,
},
Placement: iamv1alpha2.Placement{
ClusterSelector: iamv1alpha2.ClusterSelector{},
},
},
}
err := controllerutil.SetControllerReference(globalRole, federatedGlobalRole, scheme.Scheme)
if err != nil {
return err
}
data, err := json.Marshal(federatedGlobalRole)
if err != nil {
return err
}
cli := c.k8sClient.(*kubernetes.Clientset)
err = cli.RESTClient().Post().
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedGlobalRoleResource.Group,
iamv1alpha2.FedGlobalRoleResource.Version, iamv1alpha2.FedGlobalRoleResource.Name)).
Body(data).
Do(ctx).Error()
if err != nil {
if errors.IsAlreadyExists(err) {
return nil
var notReadyClusters []string
for _, cluster := range clusters {
if clusterutils.IsHostCluster(&cluster) {
continue
}
return err
}
return nil
}
func (c *Controller) updateFederatedGlobalRole(ctx context.Context, federatedGlobalRole *iamv1alpha2.FederatedRole) error {
data, err := json.Marshal(federatedGlobalRole)
if err != nil {
return err
}
cli := c.k8sClient.(*kubernetes.Clientset)
err = cli.RESTClient().Put().
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedGlobalRoleResource.Group,
iamv1alpha2.FedGlobalRoleResource.Version, iamv1alpha2.FedGlobalRoleResource.Name,
federatedGlobalRole.Name)).
Body(data).
Do(ctx).Error()
if err != nil {
if errors.IsNotFound(err) {
return nil
// skip if cluster is not ready
if !clusterutils.IsClusterReady(&cluster) {
notReadyClusters = append(notReadyClusters, cluster.Name)
continue
}
return err
}
return nil
}
func (c *Controller) ensureNotControlledByKubefed(ctx context.Context, globalRole *iamv1alpha2.GlobalRole) error {
if globalRole.Labels[constants.KubefedManagedLabel] != "false" {
if globalRole.Labels == nil {
globalRole.Labels = make(map[string]string, 0)
}
globalRole = globalRole.DeepCopy()
globalRole.Labels[constants.KubefedManagedLabel] = "false"
_, err := c.ksClient.IamV1alpha2().GlobalRoles().Update(ctx, globalRole, metav1.UpdateOptions{})
clusterClient, err := r.clusterClient.GetRuntimeClient(cluster.Name)
if err != nil {
klog.Error(err)
return fmt.Errorf("failed to get cluster client: %s", err)
}
if err = clusterClient.Delete(ctx, &iamv1beta1.GlobalRole{ObjectMeta: metav1.ObjectMeta{Name: globalRole.Name}}); err != nil {
if errors.IsNotFound(err) {
continue
}
return err
}
}
if len(notReadyClusters) > 0 {
err = fmt.Errorf("cluster not ready: %s", strings.Join(notReadyClusters, ","))
klog.FromContext(ctx).Error(err, "failed to delete related resources")
r.recorder.Event(globalRole, corev1.EventTypeWarning, kscontroller.SyncFailed, fmt.Sprintf("cluster not ready: %s", strings.Join(notReadyClusters, ",")))
return err
}
return nil
}
func (r *Reconciler) multiClusterSync(ctx context.Context, globalRole *iamv1beta1.GlobalRole) error {
clusters, err := r.clusterClient.ListClusters(ctx)
if err != nil {
return fmt.Errorf("failed to list clusters: %s", err)
}
var notReadyClusters []string
for _, cluster := range clusters {
// skip if cluster is not ready
if !clusterutils.IsClusterReady(&cluster) {
notReadyClusters = append(notReadyClusters, cluster.Name)
continue
}
if clusterutils.IsHostCluster(&cluster) {
continue
}
if err := r.syncGlobalRole(ctx, cluster, globalRole); err != nil {
return fmt.Errorf("failed to sync global role %s to cluster %s: %s", globalRole.Name, cluster.Name, err)
}
}
if len(notReadyClusters) > 0 {
klog.FromContext(ctx).V(4).Info("cluster not ready", "clusters", strings.Join(notReadyClusters, ","))
r.recorder.Event(globalRole, corev1.EventTypeWarning, kscontroller.SyncFailed, fmt.Sprintf("cluster not ready: %s", strings.Join(notReadyClusters, ",")))
}
return nil
}
func (r *Reconciler) syncGlobalRole(ctx context.Context, cluster clusterv1alpha1.Cluster, globalRole *iamv1beta1.GlobalRole) error {
if clusterutils.IsHostCluster(&cluster) {
return nil
}
clusterClient, err := r.clusterClient.GetRuntimeClient(cluster.Name)
if err != nil {
return fmt.Errorf("failed to get cluster client: %s", err)
}
target := &iamv1beta1.GlobalRole{ObjectMeta: metav1.ObjectMeta{Name: globalRole.Name}}
op, err := controllerutil.CreateOrUpdate(ctx, clusterClient, target, func() error {
target.Labels = globalRole.Labels
target.Annotations = globalRole.Annotations
target.Rules = globalRole.Rules
target.AggregationRoleTemplates = globalRole.AggregationRoleTemplates
return nil
})
if err != nil {
return fmt.Errorf("failed to update global role: %s", err)
}
r.logger.V(4).Info("global role successfully synced", "cluster", cluster.Name, "operation", op, "name", globalRole.Name)
return nil
}

View File

@@ -1,446 +1,245 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package globalrolebinding
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/constants"
devops "kubesphere.io/kubesphere/pkg/simple/client/devops"
"reflect"
"time"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"kubesphere.io/kubesphere/pkg/controller/cluster/predicate"
clusterutils "kubesphere.io/kubesphere/pkg/controller/cluster/utils"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
successSynced = "Synced"
// is synced successfully
messageResourceSynced = "GlobalRoleBinding synced successfully"
controllerName = "globalrolebinding-controller"
controllerName = "globalrolebinding"
finalizer = "finalizers.kubesphere.io/globalrolebindings"
)
type Controller struct {
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
globalRoleBindingLister iamv1alpha2listers.GlobalRoleBindingLister
globalRoleBindingSynced cache.InformerSynced
fedGlobalRoleBindingCache cache.Store
fedGlobalRoleBindingCacheController cache.Controller
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
multiClusterEnabled bool
//nolint:unused
devopsClient devops.Interface
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
type Reconciler struct {
client.Client
logger logr.Logger
recorder record.EventRecorder
clusterClient clusterclient.Interface
}
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer,
fedGlobalRoleBindingCache cache.Store, fedGlobalRoleBindingCacheController cache.Controller,
multiClusterEnabled bool) *Controller {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &Controller{
k8sClient: k8sClient,
ksClient: ksClient,
globalRoleBindingLister: globalRoleBindingInformer.Lister(),
globalRoleBindingSynced: globalRoleBindingInformer.Informer().HasSynced,
fedGlobalRoleBindingCache: fedGlobalRoleBindingCache,
fedGlobalRoleBindingCacheController: fedGlobalRoleBindingCacheController,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GlobalRoleBinding"),
recorder: recorder,
multiClusterEnabled: multiClusterEnabled,
}
klog.Info("Setting up event handlers")
globalRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.enqueueGlobalRoleBinding,
UpdateFunc: func(old, new interface{}) {
ctl.enqueueGlobalRoleBinding(new)
},
DeleteFunc: ctl.enqueueGlobalRoleBinding,
})
return ctl
func (r *Reconciler) Name() string {
return controllerName
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.Info("Starting GlobalRoleBinding controller")
// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
synced := make([]cache.InformerSynced, 0)
synced = append(synced, c.globalRoleBindingSynced)
if c.multiClusterEnabled {
synced = append(synced, c.fedGlobalRoleBindingCacheController.HasSynced)
}
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
func (r *Reconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (c *Controller) enqueueGlobalRoleBinding(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.clusterClient = mgr.ClusterClient
r.Client = mgr.GetClient()
r.logger = mgr.GetLogger().WithName(controllerName)
r.recorder = mgr.GetEventRecorderFor(controllerName)
return builder.
ControllerManagedBy(mgr).
For(&iamv1beta1.GlobalRoleBinding{}).
Watches(
&clusterv1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.mapper),
builder.WithPredicates(predicate.ClusterStatusChangedPredicate{}),
).
WithOptions(controller.Options{MaxConcurrentReconciles: 2}).
Named(controllerName).
Complete(r)
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
func (r *Reconciler) mapper(ctx context.Context, o client.Object) []reconcile.Request {
cluster := o.(*clusterv1alpha1.Cluster)
if !clusterutils.IsClusterReady(cluster) {
return []reconcile.Request{}
}
globalRoleBindings := &iamv1beta1.GlobalRoleBindingList{}
if err := r.List(ctx, globalRoleBindings); err != nil {
r.logger.Error(err, "failed to list global role bindings")
return []reconcile.Request{}
}
var result []reconcile.Request
for _, globalRoleBinding := range globalRoleBindings.Items {
result = append(result, reconcile.Request{NamespacedName: types.NamespacedName{Name: globalRoleBinding.Name}})
}
return result
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
globalRoleBinding := &iamv1beta1.GlobalRoleBinding{}
if err := r.Get(ctx, req.NamespacedName, globalRoleBinding); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
if globalRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !controllerutil.ContainsFinalizer(globalRoleBinding, finalizer) {
expected := globalRoleBinding.DeepCopy()
controllerutil.AddFinalizer(expected, finalizer)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(globalRoleBinding))
}
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.reconcile(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(globalRoleBinding, finalizer) {
if err := r.deleteRelatedResources(ctx, globalRoleBinding); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete related resources: %s", err)
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(globalRoleBinding, finalizer)
if err := r.Update(ctx, globalRoleBinding, &client.UpdateOptions{}); err != nil {
return ctrl.Result{}, err
}
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully synced %s:%s", "key", key)
return nil
}(obj)
return ctrl.Result{}, nil
}
if err := r.multiClusterSync(ctx, globalRoleBinding); err != nil {
return ctrl.Result{}, err
}
r.recorder.Event(globalRoleBinding, corev1.EventTypeNormal, kscontroller.Synced, kscontroller.MessageResourceSynced)
return ctrl.Result{}, nil
}
func (r *Reconciler) deleteRelatedResources(ctx context.Context, globalRoleBinding *iamv1beta1.GlobalRoleBinding) error {
clusters, err := r.clusterClient.ListClusters(ctx)
if err != nil {
utilruntime.HandleError(err)
return true
return fmt.Errorf("failed to list clusters: %s", err)
}
return true
}
// reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) reconcile(key string) error {
globalRoleBinding, err := c.globalRoleBindingLister.Get(key)
if err != nil {
// The resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("globalrolebinding '%s' in work queue no longer exists", key))
return nil
var notReadyClusters []string
for _, cluster := range clusters {
if clusterutils.IsHostCluster(&cluster) {
continue
}
klog.Error(err)
return err
}
if globalRoleBinding.RoleRef.Name == iamv1alpha2.PlatformAdmin {
if err := c.assignClusterAdminRole(globalRoleBinding); err != nil {
klog.Error(err)
// skip if cluster is not ready
if !clusterutils.IsClusterReady(&cluster) {
notReadyClusters = append(notReadyClusters, cluster.Name)
continue
}
clusterClient, err := r.clusterClient.GetRuntimeClient(cluster.Name)
if err != nil {
return fmt.Errorf("failed to get cluster client: %s", err)
}
if err = clusterClient.Delete(ctx, &iamv1beta1.GlobalRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: globalRoleBinding.Name}}); err != nil {
if errors.IsNotFound(err) {
continue
}
return err
}
}
if c.multiClusterEnabled {
if err = c.multiClusterSync(globalRoleBinding); err != nil {
klog.Error(err)
return err
}
if len(notReadyClusters) > 0 {
err = fmt.Errorf("cluster not ready: %s", strings.Join(notReadyClusters, ","))
klog.FromContext(ctx).Error(err, "failed to delete related resources")
r.recorder.Event(globalRoleBinding, corev1.EventTypeWarning, kscontroller.SyncFailed, fmt.Sprintf("cluster not ready: %s", strings.Join(notReadyClusters, ",")))
return err
}
c.recorder.Event(globalRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
return nil
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) multiClusterSync(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
if err := c.ensureNotControlledByKubefed(globalRoleBinding); err != nil {
klog.Error(err)
return err
}
obj, exist, err := c.fedGlobalRoleBindingCache.GetByKey(globalRoleBinding.Name)
if !exist {
return c.createFederatedGlobalRoleBinding(globalRoleBinding)
}
if err != nil {
klog.Error(err)
return err
}
var federatedGlobalRoleBinding iamv1alpha2.FederatedRoleBinding
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedGlobalRoleBinding)
if err != nil {
klog.Error(err)
return err
}
if !reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Subjects, globalRoleBinding.Subjects) ||
!reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.RoleRef, globalRoleBinding.RoleRef) ||
!reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Labels, globalRoleBinding.Labels) ||
!reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Annotations, globalRoleBinding.Annotations) {
federatedGlobalRoleBinding.Spec.Template.Subjects = globalRoleBinding.Subjects
federatedGlobalRoleBinding.Spec.Template.RoleRef = globalRoleBinding.RoleRef
federatedGlobalRoleBinding.Spec.Template.Annotations = globalRoleBinding.Annotations
federatedGlobalRoleBinding.Spec.Template.Labels = globalRoleBinding.Labels
return c.updateFederatedGlobalRoleBinding(&federatedGlobalRoleBinding)
}
return nil
}
func (c *Controller) assignClusterAdminRole(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
username := findExpectUsername(globalRoleBinding)
func (r *Reconciler) assignClusterAdminRole(ctx context.Context, clusterName string, clusterClient client.Client, globalRoleBinding *iamv1beta1.GlobalRoleBinding) error {
username := globalRoleBinding.Labels[iamv1beta1.UserReferenceLabel]
if username == "" {
return nil
}
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", username, iamv1alpha2.ClusterAdmin),
},
Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects),
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: iamv1alpha2.ResourceKindClusterRole,
Name: iamv1alpha2.ClusterAdmin,
},
}
err := controllerutil.SetControllerReference(globalRoleBinding, clusterRoleBinding, scheme.Scheme)
if err != nil {
return err
}
_, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(context.Background(), clusterRoleBinding, metav1.CreateOptions{})
if err != nil {
if errors.IsAlreadyExists(err) {
return nil
}
return err
}
return nil
}
func findExpectUsername(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) string {
for _, subject := range globalRoleBinding.Subjects {
if subject.Kind == iamv1alpha2.ResourceKindUser {
return subject.Name
}
}
return ""
}
func (c *Controller) createFederatedGlobalRoleBinding(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
federatedGlobalRoleBinding := &iamv1alpha2.FederatedRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: iamv1alpha2.FedGlobalRoleBindingKind,
APIVersion: iamv1alpha2.FedGlobalRoleBindingResource.Group + "/" + iamv1alpha2.FedGlobalRoleBindingResource.Version,
},
ObjectMeta: metav1.ObjectMeta{
Name: globalRoleBinding.Name,
},
Spec: iamv1alpha2.FederatedRoleBindingSpec{
Template: iamv1alpha2.RoleBindingTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: globalRoleBinding.Labels,
Annotations: globalRoleBinding.Annotations,
},
Subjects: globalRoleBinding.Subjects,
RoleRef: globalRoleBinding.RoleRef,
clusterRoleBinding := &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-%s", username, iamv1beta1.ClusterAdmin)}}
op, err := controllerutil.CreateOrUpdate(ctx, clusterClient, clusterRoleBinding, func() error {
clusterRoleBinding.Labels = map[string]string{iamv1beta1.RoleReferenceLabel: iamv1beta1.ClusterAdmin, iamv1beta1.UserReferenceLabel: username}
clusterRoleBinding.Subjects = []rbacv1.Subject{
{
Kind: rbacv1.UserKind,
APIGroup: rbacv1.GroupName,
Name: username,
},
Placement: iamv1alpha2.Placement{
ClusterSelector: iamv1alpha2.ClusterSelector{},
},
},
}
err := controllerutil.SetControllerReference(globalRoleBinding, federatedGlobalRoleBinding, scheme.Scheme)
if err != nil {
return err
}
data, err := json.Marshal(federatedGlobalRoleBinding)
if err != nil {
return err
}
cli := c.k8sClient.(*kubernetes.Clientset)
err = cli.RESTClient().Post().
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedGlobalRoleBindingResource.Group,
iamv1alpha2.FedGlobalRoleBindingResource.Version, iamv1alpha2.FedGlobalRoleBindingResource.Name)).
Body(data).
Do(context.Background()).Error()
if err != nil {
if errors.IsAlreadyExists(err) {
return nil
}
return err
clusterRoleBinding.RoleRef = rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: iamv1beta1.ResourceKindClusterRole,
Name: iamv1beta1.ClusterAdmin,
}
if err := controllerutil.SetControllerReference(globalRoleBinding, clusterRoleBinding, r.Scheme()); err != nil {
return fmt.Errorf("failed to set controller reference: %s", err)
}
return nil
})
if err != nil {
return fmt.Errorf("failed to update cluster admin role binding %s: %s", clusterRoleBinding.Name, err)
}
r.logger.V(4).Info("cluster admin role binding successfully synced", "cluster", clusterName, "operation", op, "name", globalRoleBinding.Name)
return nil
}
func (c *Controller) updateFederatedGlobalRoleBinding(federatedGlobalRoleBinding *iamv1alpha2.FederatedRoleBinding) error {
data, err := json.Marshal(federatedGlobalRoleBinding)
func (r *Reconciler) multiClusterSync(ctx context.Context, globalRoleBinding *iamv1beta1.GlobalRoleBinding) error {
clusters, err := r.clusterClient.ListClusters(ctx)
if err != nil {
return err
return fmt.Errorf("failed to list clusters: %s", err)
}
cli := c.k8sClient.(*kubernetes.Clientset)
err = cli.RESTClient().Put().
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedGlobalRoleBindingResource.Group,
iamv1alpha2.FedGlobalRoleBindingResource.Version, iamv1alpha2.FedGlobalRoleBindingResource.Name,
federatedGlobalRoleBinding.Name)).
Body(data).
Do(context.Background()).Error()
if err != nil {
if errors.IsNotFound(err) {
return nil
var notReadyClusters []string
for _, cluster := range clusters {
// skip if cluster is not ready
if !clusterutils.IsClusterReady(&cluster) {
notReadyClusters = append(notReadyClusters, cluster.Name)
continue
}
if err := r.syncGlobalRoleBinding(ctx, &cluster, globalRoleBinding); err != nil {
return fmt.Errorf("failed to sync global role binding %s to cluster %s: %s", globalRoleBinding.Name, cluster.Name, err)
}
return err
}
return nil
}
func (c *Controller) ensureNotControlledByKubefed(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
if globalRoleBinding.Labels[constants.KubefedManagedLabel] != "false" {
if globalRoleBinding.Labels == nil {
globalRoleBinding.Labels = make(map[string]string, 0)
}
globalRoleBinding = globalRoleBinding.DeepCopy()
globalRoleBinding.Labels[constants.KubefedManagedLabel] = "false"
_, err := c.ksClient.IamV1alpha2().GlobalRoleBindings().Update(context.Background(), globalRoleBinding, metav1.UpdateOptions{})
if err != nil {
klog.Error(err)
}
if len(notReadyClusters) > 0 {
klog.FromContext(ctx).V(4).Info("cluster not ready", "clusters", strings.Join(notReadyClusters, ","))
r.recorder.Event(globalRoleBinding, corev1.EventTypeWarning, kscontroller.SyncFailed, fmt.Sprintf("cluster not ready: %s", strings.Join(notReadyClusters, ",")))
}
return nil
}
func ensureSubjectAPIVersionIsValid(subjects []rbacv1.Subject) []rbacv1.Subject {
validSubjects := make([]rbacv1.Subject, 0)
for _, subject := range subjects {
if subject.Kind == iamv1alpha2.ResourceKindUser {
validSubject := rbacv1.Subject{
Kind: iamv1alpha2.ResourceKindUser,
APIGroup: "rbac.authorization.k8s.io",
Name: subject.Name,
}
validSubjects = append(validSubjects, validSubject)
func (r *Reconciler) syncGlobalRoleBinding(ctx context.Context, cluster *clusterv1alpha1.Cluster, globalRoleBinding *iamv1beta1.GlobalRoleBinding) error {
clusterClient, err := r.clusterClient.GetRuntimeClient(cluster.Name)
if err != nil {
return fmt.Errorf("failed to get cluster client: %s", err)
}
target := &iamv1beta1.GlobalRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: globalRoleBinding.Name}}
op, err := controllerutil.CreateOrUpdate(ctx, clusterClient, target, func() error {
target.Labels = globalRoleBinding.Labels
target.Annotations = globalRoleBinding.Annotations
target.RoleRef = globalRoleBinding.RoleRef
target.Subjects = globalRoleBinding.Subjects
return nil
})
if err != nil {
return fmt.Errorf("failed to update global role binding: %s", err)
}
if globalRoleBinding.RoleRef.Name == iamv1beta1.PlatformAdmin {
if err := r.assignClusterAdminRole(ctx, cluster.Name, clusterClient, target); err != nil {
return fmt.Errorf("failed to assign cluster admin: %s", err)
}
}
return validSubjects
r.logger.V(4).Info("global role binding successfully synced", "cluster", cluster.Name, "operation", op, "name", globalRoleBinding.Name)
return nil
}

View File

@@ -1,344 +1,134 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package group
import (
"context"
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/util/validation"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
iam1alpha2 "kubesphere.io/api/iam/v1alpha2"
tenantv1alpha2 "kubesphere.io/api/tenant/v1alpha2"
fedv1beta1types "kubesphere.io/api/types/v1beta1"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
fedv1beta1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/types/v1beta1"
iamv1alpha1listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
fedv1beta1lister "kubesphere.io/kubesphere/pkg/client/listers/types/v1beta1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/utils/controller"
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
"kubesphere.io/kubesphere/pkg/controller"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
)
const (
successSynced = "Synced"
messageResourceSynced = "Group synced successfully"
controllerName = "group-controller"
finalizer = "finalizers.kubesphere.io/groups"
controllerName = "group"
finalizer = "finalizers.kubesphere.io/groups"
)
type Controller struct {
controller.BaseController
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
groupInformer iamv1alpha2informers.GroupInformer
groupLister iamv1alpha1listers.GroupLister
recorder record.EventRecorder
federatedGroupLister fedv1beta1lister.FederatedGroupLister
multiClusterEnabled bool
type Reconciler struct {
client.Client
recorder record.EventRecorder
}
// NewController creates Group Controller instance
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, groupInformer iamv1alpha2informers.GroupInformer,
federatedGroupInformer fedv1beta1informers.FederatedGroupInformer,
multiClusterEnabled bool) *Controller {
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
ctl := &Controller{
BaseController: controller.BaseController{
Workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Group"),
Synced: []cache.InformerSynced{groupInformer.Informer().HasSynced},
Name: controllerName,
},
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}),
k8sClient: k8sClient,
ksClient: ksClient,
groupInformer: groupInformer,
groupLister: groupInformer.Lister(),
multiClusterEnabled: multiClusterEnabled,
}
if ctl.multiClusterEnabled {
ctl.federatedGroupLister = federatedGroupInformer.Lister()
ctl.Synced = append(ctl.Synced, federatedGroupInformer.Informer().HasSynced)
}
ctl.Handler = ctl.reconcile
klog.Info("Setting up event handlers")
groupInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.Enqueue,
UpdateFunc: func(old, new interface{}) {
ctl.Enqueue(new)
},
DeleteFunc: ctl.Enqueue,
})
return ctl
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.Client = mgr.GetClient()
return builder.
ControllerManagedBy(mgr).
For(
&iamv1beta1.Group{},
builder.WithPredicates(
predicate.ResourceVersionChangedPredicate{},
),
).
Named(controllerName).
Complete(r)
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(1, ctx.Done())
}
// reconcile handles Group informer events, clear up related reource when group is being deleted.
func (c *Controller) reconcile(key string) error {
group, err := c.groupLister.Get(key)
if err != nil {
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("group '%s' in work queue no longer exists", key))
return nil
}
klog.Error(err)
return err
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
group := &iamv1beta1.Group{}
if err := r.Get(ctx, req.NamespacedName, group); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if group.ObjectMeta.DeletionTimestamp.IsZero() {
var g *iam1alpha2.Group
var g *iamv1beta1.Group
if !sliceutil.HasString(group.Finalizers, finalizer) {
g = group.DeepCopy()
g.ObjectMeta.Finalizers = append(g.ObjectMeta.Finalizers, finalizer)
}
if c.multiClusterEnabled {
// Ensure not controlled by Kubefed
if group.Labels == nil || group.Labels[constants.KubefedManagedLabel] != "false" {
if g == nil {
g = group.DeepCopy()
}
if g.Labels == nil {
g.Labels = make(map[string]string, 0)
}
g.Labels[constants.KubefedManagedLabel] = "false"
}
}
// Set OwnerReferences when the group has a parent or Workspace. And it's not owned by kubefed
if group.Labels != nil && group.Labels[constants.KubefedManagedLabel] != "true" {
if parent, ok := group.Labels[iam1alpha2.GroupParent]; ok {
// If the Group is owned by a Parent
if !k8sutil.IsControlledBy(group.OwnerReferences, "Group", parent) {
if g == nil {
g = group.DeepCopy()
}
groupParent, err := c.groupLister.Get(parent)
if err != nil {
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("Parent group '%s' no longer exists", key))
delete(g.Labels, iam1alpha2.GroupParent)
} else {
klog.Error(err)
return err
}
} else {
if err := controllerutil.SetControllerReference(groupParent, g, scheme.Scheme); err != nil {
klog.Error(err)
return err
}
}
}
} else if ws, ok := group.Labels[constants.WorkspaceLabelKey]; ok {
// If the Group is owned by a Workspace
if !k8sutil.IsControlledBy(group.OwnerReferences, tenantv1alpha2.ResourceKindWorkspaceTemplate, ws) {
workspace, err := c.ksClient.TenantV1alpha2().WorkspaceTemplates().Get(context.Background(), ws, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("Workspace '%s' no longer exists", ws))
} else {
klog.Error(err)
return err
}
} else {
if g == nil {
g = group.DeepCopy()
}
g.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(g.OwnerReferences)
if err := controllerutil.SetControllerReference(workspace, g, scheme.Scheme); err != nil {
return err
}
}
}
}
}
if g != nil {
if _, err = c.ksClient.IamV1alpha2().Groups().Update(context.Background(), g, metav1.UpdateOptions{}); err != nil {
return err
}
// Skip reconcile when group is updated.
return nil
return ctrl.Result{}, r.Update(ctx, g)
}
} else {
// The object is being deleted
if sliceutil.HasString(group.ObjectMeta.Finalizers, finalizer) {
if err = c.deleteGroupBindings(group); err != nil {
klog.Error(err)
return err
if err := r.deleteGroupBindings(ctx, group); err != nil {
return ctrl.Result{}, err
}
if err = c.deleteRoleBindings(group); err != nil {
klog.Error(err)
return err
if err := r.deleteRoleBindings(ctx, group); err != nil {
return ctrl.Result{}, err
}
group.Finalizers = sliceutil.RemoveString(group.ObjectMeta.Finalizers, func(item string) bool {
return item == finalizer
})
if _, err = c.ksClient.IamV1alpha2().Groups().Update(context.Background(), group, metav1.UpdateOptions{}); err != nil {
return err
}
return ctrl.Result{}, r.Update(ctx, group)
}
return nil
return ctrl.Result{}, nil
}
// synchronization through kubefed-controller when multi cluster is enabled
if c.multiClusterEnabled {
if err = c.multiClusterSync(group); err != nil {
klog.Error(err)
return err
}
}
// TODO: sync logic needs to be updated and no longer relies on KubeFed, it needs to be synchronized manually.
c.recorder.Event(group, corev1.EventTypeNormal, successSynced, messageResourceSynced)
return nil
r.recorder.Event(group, corev1.EventTypeNormal, controller.Synced, controller.MessageResourceSynced)
return ctrl.Result{}, nil
}
func (c *Controller) deleteGroupBindings(group *iam1alpha2.Group) error {
func (r *Reconciler) deleteGroupBindings(ctx context.Context, group *iamv1beta1.Group) error {
if len(group.Name) > validation.LabelValueMaxLength {
// ignore invalid label value error
return nil
}
// Groupbindings that created by kubesphere will be deleted directly.
listOptions := metav1.ListOptions{
LabelSelector: labels.SelectorFromValidatedSet(labels.Set{iam1alpha2.GroupReferenceLabel: group.Name}).String(),
}
if err := c.ksClient.IamV1alpha2().GroupBindings().
DeleteCollection(context.Background(), *metav1.NewDeleteOptions(0), listOptions); err != nil {
klog.Error(err)
return err
}
return nil
// Group bindings that created by kubesphere will be deleted directly.
return r.DeleteAllOf(ctx, &iamv1beta1.GroupBinding{}, client.GracePeriodSeconds(0), client.MatchingLabelsSelector{
Selector: labels.SelectorFromValidatedSet(labels.Set{iamv1beta1.GroupReferenceLabel: group.Name}),
})
}
// remove all RoleBindings.
func (c *Controller) deleteRoleBindings(group *iam1alpha2.Group) error {
func (r *Reconciler) deleteRoleBindings(ctx context.Context, group *iamv1beta1.Group) error {
if len(group.Name) > validation.LabelValueMaxLength {
// ignore invalid label value error
return nil
}
listOptions := metav1.ListOptions{
LabelSelector: labels.SelectorFromValidatedSet(labels.Set{iam1alpha2.GroupReferenceLabel: group.Name}).String(),
}
deleteOptions := *metav1.NewDeleteOptions(0)
if err := c.ksClient.IamV1alpha2().WorkspaceRoleBindings().
DeleteCollection(context.Background(), deleteOptions, listOptions); err != nil {
klog.Error(err)
selector := labels.SelectorFromValidatedSet(labels.Set{iamv1beta1.GroupReferenceLabel: group.Name})
deleteOption := client.GracePeriodSeconds(0)
if err := r.DeleteAllOf(ctx, &iamv1beta1.WorkspaceRoleBinding{}, deleteOption, client.MatchingLabelsSelector{Selector: selector}); err != nil {
return err
}
if err := c.k8sClient.RbacV1().ClusterRoleBindings().
DeleteCollection(context.Background(), deleteOptions, listOptions); err != nil {
klog.Error(err)
if err := r.DeleteAllOf(ctx, &rbacv1.ClusterRoleBinding{}, deleteOption, client.MatchingLabelsSelector{Selector: selector}); err != nil {
return err
}
if result, err := c.k8sClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}); err != nil {
klog.Error(err)
return err
} else {
for _, namespace := range result.Items {
if err = c.k8sClient.RbacV1().RoleBindings(namespace.Name).DeleteCollection(context.Background(), deleteOptions, listOptions); err != nil {
klog.Error(err)
return err
}
}
}
return nil
}
func (c *Controller) multiClusterSync(group *iam1alpha2.Group) error {
obj, err := c.federatedGroupLister.Get(group.Name)
if err != nil {
if errors.IsNotFound(err) {
return c.createFederatedGroup(group)
}
klog.Error(err)
namespaces := &corev1.NamespaceList{}
if err := r.List(ctx, namespaces); err != nil {
return err
}
if !reflect.DeepEqual(obj.Spec.Template.Labels, group.Labels) {
obj.Spec.Template.Labels = group.Labels
if _, err = c.ksClient.TypesV1beta1().FederatedGroups().Update(context.Background(), obj, metav1.UpdateOptions{}); err != nil {
for _, namespace := range namespaces.Items {
if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, deleteOption, client.MatchingLabelsSelector{Selector: selector}, client.InNamespace(namespace.Name)); err != nil {
return err
}
}
return nil
}
func (c *Controller) createFederatedGroup(group *iam1alpha2.Group) error {
federatedGroup := &fedv1beta1types.FederatedGroup{
ObjectMeta: metav1.ObjectMeta{
Name: group.Name,
},
Spec: fedv1beta1types.FederatedGroupSpec{
Template: fedv1beta1types.GroupTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: group.Labels,
},
Spec: group.Spec,
},
Placement: fedv1beta1types.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
},
}
// must bind group lifecycle
err := controllerutil.SetControllerReference(group, federatedGroup, scheme.Scheme)
if err != nil {
return err
}
if _, err = c.ksClient.TypesV1beta1().FederatedGroups().Create(context.Background(), federatedGroup, metav1.CreateOptions{}); err != nil {
return err
}
return nil
}

View File

@@ -1,454 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package group
import (
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
v1alpha2 "kubesphere.io/api/iam/v1alpha2"
tenantv1alpha2 "kubesphere.io/api/tenant/v1alpha2"
fedv1beta1types "kubesphere.io/api/types/v1beta1"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
)
var (
noResyncPeriodFunc = func() time.Duration { return 0 }
)
func init() {
v1alpha2.AddToScheme(scheme.Scheme)
tenantv1alpha2.AddToScheme(scheme.Scheme)
}
type fixture struct {
t *testing.T
ksclient *fake.Clientset
k8sclient *k8sfake.Clientset
// Objects to put in the store.
groupLister []*v1alpha2.Group
fedgroupLister []*fedv1beta1types.FederatedGroup
// Actions expected to happen on the client.
kubeactions []core.Action
actions []core.Action
// Objects from here preloaded into NewSimpleFake.
kubeobjects []runtime.Object
objects []runtime.Object
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
f.kubeobjects = []runtime.Object{}
return f
}
func newGroup(name string) *v1alpha2.Group {
return &v1alpha2.Group{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String(), Kind: "Group"},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1alpha2.GroupSpec{},
}
}
func newUnmanagedGroup(name string) *v1alpha2.Group {
return &v1alpha2.Group{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String(), Kind: "Group"},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{constants.KubefedManagedLabel: "false"},
Finalizers: []string{"finalizers.kubesphere.io/groups"},
},
Spec: v1alpha2.GroupSpec{},
}
}
func newFederatedGroup(group *v1alpha2.Group) *fedv1beta1types.FederatedGroup {
return &fedv1beta1types.FederatedGroup{
ObjectMeta: metav1.ObjectMeta{
Name: group.Name,
},
Spec: fedv1beta1types.FederatedGroupSpec{
Template: fedv1beta1types.GroupTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: group.Labels,
},
Spec: group.Spec,
},
Placement: fedv1beta1types.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
},
}
}
func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
f.ksclient = fake.NewSimpleClientset(f.objects...)
f.k8sclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
ksinformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc())
k8sinformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc())
for _, group := range f.groupLister {
err := ksinformers.Iam().V1alpha2().Groups().Informer().GetIndexer().Add(group)
if err != nil {
f.t.Errorf("add group:%s", err)
}
}
for _, group := range f.fedgroupLister {
err := ksinformers.Types().V1beta1().FederatedGroups().Informer().GetIndexer().Add(group)
if err != nil {
f.t.Errorf("add federated group:%s", err)
}
}
c := NewController(f.k8sclient, f.ksclient,
ksinformers.Iam().V1alpha2().Groups(),
ksinformers.Types().V1beta1().FederatedGroups(), true)
c.recorder = &record.FakeRecorder{}
return c, ksinformers, k8sinformers
}
func (f *fixture) run(userName string) {
f.runController(userName, true, false)
}
//nolint:unused
func (f *fixture) runExpectError(userName string) {
f.runController(userName, true, true)
}
func (f *fixture) runController(group string, startInformers bool, expectError bool) {
c, i, k8sI := f.newController()
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
i.Start(stopCh)
k8sI.Start(stopCh)
}
err := c.Handler(group)
if !expectError && err != nil {
f.t.Errorf("error syncing group: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing group, got nil")
}
actions := filterInformerActions(f.ksclient.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
k8sActions := filterInformerActions(f.k8sclient.Actions())
for i, action := range k8sActions {
if len(f.kubeactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:])
break
}
expectedAction := f.kubeactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.kubeactions) > len(k8sActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
}
}
// checkAction verifies that expected and actual actions are equal and both have
// same attached resources
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
case core.DeleteCollectionActionImpl:
e, _ := expected.(core.DeleteCollectionActionImpl)
exp := e.GetListRestrictions()
target := a.GetListRestrictions()
if !reflect.DeepEqual(exp, target) {
t.Errorf("Action %s %s has wrong Query\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(exp, target))
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
// filterInformerActions filters list and watch actions for testing resources.
// Since list and watch don't change resource state we can filter it to lower
// nose level in our tests.
func filterInformerActions(actions []core.Action) []core.Action {
var ret []core.Action
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "groups") ||
action.Matches("watch", "groups") ||
action.Matches("list", "groups") ||
action.Matches("list", "namespaces") ||
action.Matches("get", "workspacetemplates") ||
action.Matches("list", "federatedgroups") ||
action.Matches("watch", "federatedgroups")) {
continue
}
ret = append(ret, action)
}
return ret
}
func (f *fixture) expectUpdateGroupsFinalizerAction(group *v1alpha2.Group) {
expect := group.DeepCopy()
if expect.Labels == nil {
expect.Labels = make(map[string]string, 0)
}
expect.Finalizers = []string{"finalizers.kubesphere.io/groups"}
expect.Labels[constants.KubefedManagedLabel] = "false"
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "groups"}, "", expect)
f.actions = append(f.actions, action)
}
func (f *fixture) expectUpdateWorkspaceRefAction(child *v1alpha2.Group, wsp *tenantv1alpha2.WorkspaceTemplate) {
expect := child.DeepCopy()
if expect.Labels == nil {
expect.Labels = make(map[string]string, 0)
}
controllerutil.SetControllerReference(wsp, expect, scheme.Scheme)
expect.Finalizers = []string{"finalizers.kubesphere.io/groups"}
expect.Labels[constants.KubefedManagedLabel] = "false"
updateAction := core.NewUpdateAction(schema.GroupVersionResource{Resource: "groups"}, "", expect)
f.actions = append(f.actions, updateAction)
}
func (f *fixture) expectUpdateParentsRefAction(parent, child *v1alpha2.Group) {
expect := child.DeepCopy()
if expect.Labels == nil {
expect.Labels = make(map[string]string, 0)
}
controllerutil.SetControllerReference(parent, expect, scheme.Scheme)
expect.Finalizers = []string{"finalizers.kubesphere.io/groups"}
expect.Labels[constants.KubefedManagedLabel] = "false"
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "groups"}, "", expect)
f.actions = append(f.actions, action)
}
func (f *fixture) expectCreateFederatedGroupsAction(group *v1alpha2.Group) {
federatedGroup := newFederatedGroup(group)
controllerutil.SetControllerReference(group, federatedGroup, scheme.Scheme)
actionCreate := core.NewCreateAction(schema.GroupVersionResource{Resource: "federatedgroups"}, "", federatedGroup)
f.actions = append(f.actions, actionCreate)
}
func (f *fixture) expectUpdateFederatedGroupsAction(group *v1alpha2.Group) {
g := newFederatedGroup(group)
controllerutil.SetControllerReference(group, g, scheme.Scheme)
actionCreate := core.NewUpdateAction(schema.GroupVersionResource{Group: "types.kubefed.io", Version: "v1beta1", Resource: "federatedgroups"}, "", g)
f.actions = append(f.actions, actionCreate)
}
func (f *fixture) expectUpdateGroupsDeleteAction(group *v1alpha2.Group) {
expect := group.DeepCopy()
expect.Finalizers = []string{}
listOptions := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{v1alpha2.GroupReferenceLabel: group.Name}).String(),
}
actionDelete := core.NewDeleteCollectionAction(schema.GroupVersionResource{Resource: "groupbindings"}, "", listOptions)
f.actions = append(f.actions, actionDelete)
actionDelete = core.NewDeleteCollectionAction(schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"}, "", listOptions)
f.kubeactions = append(f.kubeactions, actionDelete)
actionDelete = core.NewDeleteCollectionAction(schema.GroupVersionResource{Resource: "workspacerolebindings"}, "", listOptions)
f.actions = append(f.actions, actionDelete)
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "groups"}, "", expect)
f.actions = append(f.actions, action)
}
func getKey(group *v1alpha2.Group, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(group)
if err != nil {
t.Errorf("Unexpected error getting key for group %v: %v", group.Name, err)
return ""
}
return key
}
func TestDeletesGroup(t *testing.T) {
f := newFixture(t)
deletedGroup := newUnmanagedGroup("test")
now := metav1.Now()
deletedGroup.ObjectMeta.DeletionTimestamp = &now
f.groupLister = append(f.groupLister, deletedGroup)
f.objects = append(f.objects, deletedGroup)
f.expectUpdateGroupsDeleteAction(deletedGroup)
f.run(getKey(deletedGroup, t))
}
func TestDoNothing(t *testing.T) {
f := newFixture(t)
group := newGroup("test")
f.groupLister = append(f.groupLister, group)
f.objects = append(f.objects, group)
f.expectUpdateGroupsFinalizerAction(group)
f.run(getKey(group, t))
}
func TestGroupCreateWithParent(t *testing.T) {
f := newFixture(t)
parent := newGroup("parent")
child := newGroup("child")
child.Labels = map[string]string{v1alpha2.GroupParent: "parent"}
f.groupLister = append(f.groupLister, parent, child)
f.objects = append(f.objects, parent, child)
f.expectUpdateParentsRefAction(parent, child)
f.run(getKey(child, t))
}
func TestGroupCreateWithWorkspace(t *testing.T) {
f := newFixture(t)
child := newGroup("child")
child.Labels = map[string]string{constants.WorkspaceLabelKey: "wsp"}
f.groupLister = append(f.groupLister, child)
f.objects = append(f.objects, child)
wsp := tenantv1alpha2.WorkspaceTemplate{
TypeMeta: metav1.TypeMeta{APIVersion: tenantv1alpha2.SchemeGroupVersion.String(), Kind: tenantv1alpha2.ResourceKindWorkspaceTemplate},
ObjectMeta: metav1.ObjectMeta{
Name: "wsp",
},
}
f.objects = append(f.objects, &wsp)
f.expectUpdateWorkspaceRefAction(child, &wsp)
f.run(getKey(child, t))
}
func TestFederetedGroupCreate(t *testing.T) {
f := newFixture(t)
group := newUnmanagedGroup("test")
f.groupLister = append(f.groupLister, group)
f.objects = append(f.objects, group)
f.expectCreateFederatedGroupsAction(group)
f.run(getKey(group, t))
}
func TestFederetedGroupUpdate(t *testing.T) {
f := newFixture(t)
group := newUnmanagedGroup("test")
federatedGroup := newFederatedGroup(group.DeepCopy())
controllerutil.SetControllerReference(group, federatedGroup, scheme.Scheme)
f.fedgroupLister = append(f.fedgroupLister, federatedGroup)
f.objects = append(f.objects, federatedGroup)
group.Labels["foo"] = "bar"
f.groupLister = append(f.groupLister, group)
f.objects = append(f.objects, group)
f.expectUpdateFederatedGroupsAction(group.DeepCopy())
f.run(getKey(group, t))
}

View File

@@ -1,189 +1,103 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package groupbinding
import (
"context"
"fmt"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/predicate"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
fedv1beta1types "kubesphere.io/api/types/v1beta1"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
fedv1beta1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/types/v1beta1"
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
fedv1beta1lister "kubesphere.io/kubesphere/pkg/client/listers/types/v1beta1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/utils/controller"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
)
const (
successSynced = "Synced"
messageResourceSynced = "GroupBinding synced successfully"
controllerName = "groupbinding-controller"
finalizer = "finalizers.kubesphere.io/groupsbindings"
controllerName = "groupbinding"
finalizer = "finalizers.kubesphere.io/groupsbindings"
)
type Controller struct {
controller.BaseController
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
groupBindingLister iamv1alpha2listers.GroupBindingLister
recorder record.EventRecorder
federatedGroupBindingLister fedv1beta1lister.FederatedGroupBindingLister
multiClusterEnabled bool
type Reconciler struct {
client.Client
recorder record.EventRecorder
}
// NewController creates GroupBinding Controller instance
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
groupBindingInformer iamv1alpha2informers.GroupBindingInformer,
federatedGroupBindingInformer fedv1beta1informers.FederatedGroupBindingInformer, multiClusterEnabled bool) *Controller {
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &Controller{
BaseController: controller.BaseController{
Workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GroupBinding"),
Synced: []cache.InformerSynced{groupBindingInformer.Informer().HasSynced},
Name: controllerName,
},
k8sClient: k8sClient,
ksClient: ksClient,
groupBindingLister: groupBindingInformer.Lister(),
multiClusterEnabled: multiClusterEnabled,
recorder: recorder,
}
ctl.Handler = ctl.reconcile
if ctl.multiClusterEnabled {
ctl.federatedGroupBindingLister = federatedGroupBindingInformer.Lister()
ctl.Synced = append(ctl.Synced, federatedGroupBindingInformer.Informer().HasSynced)
}
klog.Info("Setting up event handlers")
groupBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.Enqueue,
UpdateFunc: func(old, new interface{}) {
ctl.Enqueue(new)
},
DeleteFunc: ctl.Enqueue,
})
return ctl
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.Client = mgr.GetClient()
return builder.
ControllerManagedBy(mgr).
For(
&iamv1beta1.GroupBinding{},
builder.WithPredicates(
predicate.ResourceVersionChangedPredicate{},
),
).
WithOptions(controller.Options{
MaxConcurrentReconciles: 2,
}).
Named(controllerName).
Complete(r)
}
// reconcile handles GroupBinding informer events, it updates user's Groups property with the current GroupBinding.
func (c *Controller) reconcile(key string) error {
groupBinding, err := c.groupBindingLister.Get(key)
if err != nil {
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("groupbinding '%s' in work queue no longer exists", key))
return nil
}
klog.Error(err)
return err
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
groupBinding := &iamv1beta1.GroupBinding{}
if err := r.Get(ctx, req.NamespacedName, groupBinding); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if groupBinding.ObjectMeta.DeletionTimestamp.IsZero() {
var g *iamv1alpha2.GroupBinding
var g *iamv1beta1.GroupBinding
if !sliceutil.HasString(groupBinding.Finalizers, finalizer) {
g = groupBinding.DeepCopy()
g.ObjectMeta.Finalizers = append(g.ObjectMeta.Finalizers, finalizer)
}
if c.multiClusterEnabled {
// Ensure not controlled by Kubefed
if groupBinding.Labels == nil || groupBinding.Labels[constants.KubefedManagedLabel] != "false" {
if g == nil {
g = groupBinding.DeepCopy()
}
if g.Labels == nil {
g.Labels = make(map[string]string, 0)
}
g.Labels[constants.KubefedManagedLabel] = "false"
}
}
if g != nil {
if _, err = c.ksClient.IamV1alpha2().GroupBindings().Update(context.Background(), g, metav1.UpdateOptions{}); err != nil {
return err
}
// Skip reconcile when group is updated.
return nil
return ctrl.Result{}, r.Update(ctx, g)
}
} else {
// The object is being deleted
if sliceutil.HasString(groupBinding.ObjectMeta.Finalizers, finalizer) {
if err = c.unbindUser(groupBinding); err != nil {
klog.Error(err)
return err
if err := r.unbindUser(ctx, groupBinding); err != nil {
return ctrl.Result{}, err
}
groupBinding.Finalizers = sliceutil.RemoveString(groupBinding.ObjectMeta.Finalizers, func(item string) bool {
return item == finalizer
})
if _, err = c.ksClient.IamV1alpha2().GroupBindings().Update(context.Background(), groupBinding, metav1.UpdateOptions{}); err != nil {
return err
}
return ctrl.Result{}, r.Update(ctx, groupBinding)
}
return nil
return ctrl.Result{}, nil
}
if err = c.bindUser(groupBinding); err != nil {
klog.Error(err)
return err
if err := r.bindUser(ctx, groupBinding); err != nil {
return ctrl.Result{}, err
}
// synchronization through kubefed-controller when multi cluster is enabled
if c.multiClusterEnabled {
if err = c.multiClusterSync(groupBinding); err != nil {
klog.Error(err)
return err
}
}
// TODO: sync logic needs to be updated and no longer relies on KubeFed, it needs to be synchronized manually.
c.recorder.Event(groupBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
return nil
r.recorder.Event(groupBinding, corev1.EventTypeNormal, kscontroller.Synced, kscontroller.MessageResourceSynced)
return ctrl.Result{}, nil
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(2, ctx.Done())
}
func (c *Controller) unbindUser(groupBinding *iamv1alpha2.GroupBinding) error {
return c.updateUserGroups(groupBinding, func(groups []string, group string) (bool, []string) {
func (r *Reconciler) unbindUser(ctx context.Context, groupBinding *iamv1beta1.GroupBinding) error {
return r.updateUserGroups(ctx, groupBinding, func(groups []string, group string) (bool, []string) {
// remove a group from the groups
if sliceutil.HasString(groups, group) {
groups := sliceutil.RemoveString(groups, func(item string) bool {
@@ -195,9 +109,9 @@ func (c *Controller) unbindUser(groupBinding *iamv1alpha2.GroupBinding) error {
})
}
func (c *Controller) bindUser(groupBinding *iamv1alpha2.GroupBinding) error {
return c.updateUserGroups(groupBinding, func(groups []string, group string) (bool, []string) {
// add group to the groups
func (r *Reconciler) bindUser(ctx context.Context, groupBinding *iamv1beta1.GroupBinding) error {
return r.updateUserGroups(ctx, groupBinding, func(groups []string, group string) (bool, []string) {
// add a group to the groups
if !sliceutil.HasString(groups, group) {
groups := append(groups, group)
return true, groups
@@ -206,104 +120,40 @@ func (c *Controller) bindUser(groupBinding *iamv1alpha2.GroupBinding) error {
})
}
// Udpate user's Group property. So no need to query user's groups when authorizing.
func (c *Controller) updateUserGroups(groupBinding *iamv1alpha2.GroupBinding, operator func(groups []string, group string) (bool, []string)) error {
// Update user's Group property. So no need to query user's groups when authorizing.
func (r *Reconciler) updateUserGroups(ctx context.Context, groupBinding *iamv1beta1.GroupBinding, operator func(groups []string, group string) (bool, []string)) error {
for _, u := range groupBinding.Users {
// Ignore the user if the user if being deleted.
if user, err := c.ksClient.IamV1alpha2().Users().Get(context.Background(), u, metav1.GetOptions{}); err == nil && user.ObjectMeta.DeletionTimestamp.IsZero() {
// Ignore the user if the user being deleted.
user := &iamv1beta1.User{}
if err := r.Get(ctx, client.ObjectKey{Name: u}, user); err != nil {
if errors.IsNotFound(err) {
klog.Infof("user %s doesn't exist any more", u)
continue
}
return err
}
if changed, groups := operator(user.Spec.Groups, groupBinding.GroupRef.Name); changed {
if !user.DeletionTimestamp.IsZero() {
continue
}
if err := c.patchUser(user, groups); err != nil {
if errors.IsNotFound(err) {
klog.Infof("user %s doesn't exist any more", u)
continue
}
klog.Error(err)
return err
if changed, groups := operator(user.Spec.Groups, groupBinding.GroupRef.Name); changed {
if err := r.patchUser(ctx, user, groups); err != nil {
if errors.IsNotFound(err) {
klog.Infof("user %s doesn't exist any more", u)
continue
}
klog.Error(err)
return err
}
}
}
return nil
}
func (c *Controller) patchUser(user *iamv1alpha2.User, groups []string) error {
func (r *Reconciler) patchUser(ctx context.Context, user *iamv1beta1.User, groups []string) error {
newUser := user.DeepCopy()
newUser.Spec.Groups = groups
patch := client.MergeFrom(user)
patchData, _ := patch.Data(newUser)
if _, err := c.ksClient.IamV1alpha2().Users().
Patch(context.Background(), user.Name, patch.Type(), patchData, metav1.PatchOptions{}); err != nil {
return err
}
return nil
}
func (c *Controller) multiClusterSync(groupBinding *iamv1alpha2.GroupBinding) error {
fedGroupBinding, err := c.federatedGroupBindingLister.Get(groupBinding.Name)
if err != nil {
if errors.IsNotFound(err) {
return c.createFederatedGroupBinding(groupBinding)
}
klog.Error(err)
return err
}
if !reflect.DeepEqual(fedGroupBinding.Spec.Template.GroupRef, groupBinding.GroupRef) ||
!reflect.DeepEqual(fedGroupBinding.Spec.Template.Users, groupBinding.Users) ||
!reflect.DeepEqual(fedGroupBinding.Spec.Template.Labels, groupBinding.Labels) {
fedGroupBinding.Spec.Template.GroupRef = groupBinding.GroupRef
fedGroupBinding.Spec.Template.Users = groupBinding.Users
fedGroupBinding.Spec.Template.Labels = groupBinding.Labels
if _, err = c.ksClient.TypesV1beta1().FederatedGroupBindings().Update(context.Background(), fedGroupBinding, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
func (c *Controller) createFederatedGroupBinding(groupBinding *iamv1alpha2.GroupBinding) error {
federatedGroup := &fedv1beta1types.FederatedGroupBinding{
TypeMeta: metav1.TypeMeta{
Kind: fedv1beta1types.FederatedGroupBindingKind,
APIVersion: fedv1beta1types.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: groupBinding.Name,
},
Spec: fedv1beta1types.FederatedGroupBindingSpec{
Template: fedv1beta1types.GroupBindingTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: groupBinding.Labels,
},
GroupRef: groupBinding.GroupRef,
Users: groupBinding.Users,
},
Placement: fedv1beta1types.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
},
}
// must bind groupBinding lifecycle
err := controllerutil.SetControllerReference(groupBinding, federatedGroup, scheme.Scheme)
if err != nil {
return err
}
if _, err = c.ksClient.TypesV1beta1().FederatedGroupBindings().Create(context.Background(), federatedGroup, metav1.CreateOptions{}); err != nil {
return err
}
return nil
return r.Patch(ctx, newUser, patch)
}

View File

@@ -1,385 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupbinding
import (
"fmt"
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
v1alpha2 "kubesphere.io/api/iam/v1alpha2"
fedv1beta1types "kubesphere.io/api/types/v1beta1"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
)
func init() {
v1alpha2.AddToScheme(scheme.Scheme)
}
type fixture struct {
t *testing.T
ksclient *fake.Clientset
k8sclient *k8sfake.Clientset
// Objects to put in the store.
groupBindingLister []*v1alpha2.GroupBinding
//nolint:unused
fedgroupBindingLister []*fedv1beta1types.FederatedGroupBinding
userLister []*v1alpha2.User
// Actions expected to happen on the client.
k8sactions []core.Action
ksactions []core.Action
// Objects from here preloaded into NewSimpleFake.
kubeobjects []runtime.Object
objects []runtime.Object
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
f.kubeobjects = []runtime.Object{}
return f
}
func newGroupBinding(name string, users []string) *v1alpha2.GroupBinding {
return &v1alpha2.GroupBinding{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-binding", name),
},
GroupRef: v1alpha2.GroupRef{
Name: name,
},
Users: users,
}
}
func newUser(name string) *v1alpha2.User {
return &v1alpha2.User{
TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1alpha2.UserSpec{
Email: fmt.Sprintf("%s@kubesphere.io", name),
Lang: "zh-CN",
Description: "fake user",
},
}
}
func newFederatedGroupBinding(groupBinding *iamv1alpha2.GroupBinding) *fedv1beta1types.FederatedGroupBinding {
return &fedv1beta1types.FederatedGroupBinding{
TypeMeta: metav1.TypeMeta{
Kind: fedv1beta1types.FederatedGroupBindingKind,
APIVersion: fedv1beta1types.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: groupBinding.Name,
},
Spec: fedv1beta1types.FederatedGroupBindingSpec{
Template: fedv1beta1types.GroupBindingTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: groupBinding.Labels,
},
GroupRef: groupBinding.GroupRef,
Users: groupBinding.Users,
},
Placement: fedv1beta1types.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
},
}
}
func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
f.ksclient = fake.NewSimpleClientset(f.objects...)
f.k8sclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
ksinformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc())
k8sinformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc())
for _, groupBinding := range f.groupBindingLister {
err := ksinformers.Iam().V1alpha2().GroupBindings().Informer().GetIndexer().Add(groupBinding)
if err != nil {
f.t.Errorf("add groupBinding:%s", err)
}
}
for _, u := range f.userLister {
err := ksinformers.Iam().V1alpha2().Users().Informer().GetIndexer().Add(u)
if err != nil {
f.t.Errorf("add groupBinding:%s", err)
}
}
c := NewController(f.k8sclient, f.ksclient,
ksinformers.Iam().V1alpha2().GroupBindings(),
ksinformers.Types().V1beta1().FederatedGroupBindings(), true)
c.Synced = []cache.InformerSynced{alwaysReady}
c.recorder = &record.FakeRecorder{}
return c, ksinformers, k8sinformers
}
func (f *fixture) run(userName string) {
f.runController(userName, true, false)
}
//nolint:unused
func (f *fixture) runExpectError(userName string) {
f.runController(userName, true, true)
}
func (f *fixture) runController(groupBinding string, startInformers bool, expectError bool) {
c, i, k8sI := f.newController()
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
i.Start(stopCh)
k8sI.Start(stopCh)
}
err := c.reconcile(groupBinding)
if !expectError && err != nil {
f.t.Errorf("error syncing groupBinding: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing groupBinding, got nil")
}
actions := filterInformerActions(f.ksclient.Actions())
for i, action := range actions {
if len(f.ksactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.ksactions), actions[i:])
break
}
expectedAction := f.ksactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.ksactions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.ksactions)-len(actions), f.ksactions[len(actions):])
}
k8sActions := filterInformerActions(f.k8sclient.Actions())
for i, action := range k8sActions {
if len(f.k8sactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.k8sactions), k8sActions[i:])
break
}
expectedAction := f.k8sactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.k8sactions) > len(k8sActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.k8sactions)-len(k8sActions), f.k8sactions[len(k8sActions):])
}
}
// checkAction verifies that expected and actual actions are equal and both have
// same attached resources
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
expUser := expObject.(*v1alpha2.GroupBinding)
groupBinding := object.(*v1alpha2.GroupBinding)
if !reflect.DeepEqual(expUser, groupBinding) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
// filterInformerActions filters list and watch actions for testing resources.
// Since list and watch don't change resource state we can filter it to lower
// nose level in our tests.
func filterInformerActions(actions []core.Action) []core.Action {
var ret []core.Action
for _, action := range actions {
// filter out read action
if action.GetVerb() == "watch" || action.GetVerb() == "list" || action.GetVerb() == "get" {
continue
}
ret = append(ret, action)
}
return ret
}
func (f *fixture) expectUpdateGroupsFinalizerAction(groupBinding *v1alpha2.GroupBinding) {
expect := groupBinding.DeepCopy()
expect.Finalizers = []string{"finalizers.kubesphere.io/groupsbindings"}
expect.Labels = map[string]string{constants.KubefedManagedLabel: "false"}
action := core.NewUpdateAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, "", expect)
f.ksactions = append(f.ksactions, action)
}
func (f *fixture) expectUpdateGroupsDeleteAction(groupBinding *v1alpha2.GroupBinding) {
expect := groupBinding.DeepCopy()
expect.Finalizers = []string{}
action := core.NewUpdateAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, "", expect)
f.ksactions = append(f.ksactions, action)
}
func (f *fixture) expectPatchUserAction(user *v1alpha2.User, groups []string) {
newUser := user.DeepCopy()
newUser.Spec.Groups = groups
patch := client.MergeFrom(user)
patchData, _ := patch.Data(newUser)
f.ksactions = append(f.ksactions, core.NewPatchAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Resource: "users", Version: "v1alpha2"}, user.Namespace, user.Name, patch.Type(), patchData))
}
func (f *fixture) expectCreateFederatedGroupBindingsAction(groupBinding *v1alpha2.GroupBinding) {
b := newFederatedGroupBinding(groupBinding)
_ = controllerutil.SetControllerReference(groupBinding, b, scheme.Scheme)
actionCreate := core.NewCreateAction(schema.GroupVersionResource{Group: "types.kubefed.io", Version: "v1beta1", Resource: "federatedgroupbindings"}, "", b)
f.ksactions = append(f.ksactions, actionCreate)
}
func getKey(groupBinding *v1alpha2.GroupBinding, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(groupBinding)
if err != nil {
t.Errorf("Unexpected error getting key for groupBinding %v: %v", groupBinding.Name, err)
return ""
}
return key
}
func TestCreatesGroupBinding(t *testing.T) {
f := newFixture(t)
users := []string{"user1"}
groupbinding := newGroupBinding("test", users)
groupbinding.ObjectMeta.Finalizers = append(groupbinding.ObjectMeta.Finalizers, finalizer)
groupbinding.Labels = map[string]string{constants.KubefedManagedLabel: "false"}
f.groupBindingLister = append(f.groupBindingLister, groupbinding)
f.objects = append(f.objects, groupbinding)
user := newUser("user1")
f.userLister = append(f.userLister, user)
f.objects = append(f.objects, user)
expectGroups := []string{"test"}
f.expectPatchUserAction(user, expectGroups)
f.expectCreateFederatedGroupBindingsAction(groupbinding)
f.run(getKey(groupbinding, t))
}
func TestDeletesGroupBinding(t *testing.T) {
f := newFixture(t)
users := []string{"user1"}
groupbinding := newGroupBinding("test", users)
deletedGroup := groupbinding.DeepCopy()
deletedGroup.Finalizers = append(groupbinding.ObjectMeta.Finalizers, finalizer)
now := metav1.Now()
deletedGroup.ObjectMeta.DeletionTimestamp = &now
f.groupBindingLister = append(f.groupBindingLister, deletedGroup)
f.objects = append(f.objects, deletedGroup)
user := newUser("user1")
user.Spec.Groups = []string{"test"}
f.userLister = append(f.userLister, user)
f.objects = append(f.objects, user)
f.expectPatchUserAction(user, nil)
f.expectUpdateGroupsDeleteAction(deletedGroup)
f.run(getKey(deletedGroup, t))
}
func TestDoNothing(t *testing.T) {
f := newFixture(t)
users := []string{"user1"}
groupBinding := newGroupBinding("test", users)
f.groupBindingLister = append(f.groupBindingLister, groupBinding)
f.objects = append(f.objects, groupBinding)
f.expectUpdateGroupsFinalizerAction(groupBinding)
f.run(getKey(groupBinding, t))
}

View File

@@ -1,93 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"runtime"
"time"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
"github.com/operator-framework/helm-operator-plugins/pkg/annotation"
"github.com/operator-framework/helm-operator-plugins/pkg/reconciler"
"github.com/operator-framework/helm-operator-plugins/pkg/watches"
)
type Reconciler struct {
GatewayOptions *gateway.Options
}
// SetupWithManager creates reconilers for each helm package that defined in the WatchFiles.
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
var watchKinds []watches.Watch
ws, err := watches.Load(r.GatewayOptions.WatchesPath)
if err != nil {
return err
}
watchKinds = append(watchKinds, ws...)
for _, w := range watchKinds {
// Register controller with the factory
reconcilePeriod := time.Minute
if w.ReconcilePeriod != nil {
reconcilePeriod = w.ReconcilePeriod.Duration
}
maxConcurrentReconciles := runtime.NumCPU()
if w.MaxConcurrentReconciles != nil {
maxConcurrentReconciles = *w.MaxConcurrentReconciles
}
r, err := reconciler.New(
reconciler.WithChart(*w.Chart),
reconciler.WithGroupVersionKind(w.GroupVersionKind),
reconciler.WithOverrideValues(r.defaultConfiguration(w.OverrideValues)),
reconciler.SkipDependentWatches(w.WatchDependentResources != nil && !*w.WatchDependentResources),
reconciler.WithMaxConcurrentReconciles(maxConcurrentReconciles),
reconciler.WithReconcilePeriod(reconcilePeriod),
reconciler.WithInstallAnnotations(annotation.DefaultInstallAnnotations...),
reconciler.WithUpgradeAnnotations(annotation.DefaultUpgradeAnnotations...),
reconciler.WithUninstallAnnotations(annotation.DefaultUninstallAnnotations...),
)
if err != nil {
return err
}
if err := r.SetupWithManager(mgr); err != nil {
return err
}
klog.Infoln("configured watch", "gvk", w.GroupVersionKind, "chartPath", w.ChartPath, "maxConcurrentReconciles", maxConcurrentReconciles, "reconcilePeriod", reconcilePeriod)
}
return nil
}
func (r *Reconciler) defaultConfiguration(overrideValues map[string]string) map[string]string {
if overrideValues == nil {
//vendor/github.com/operator-framework/helm-operator-plugins/pkg/watches/watches.go:85-87
overrideValues = make(map[string]string)
}
if r.GatewayOptions.Repository != "" {
overrideValues["controller.image.repository"] = r.GatewayOptions.Repository
}
if r.GatewayOptions.Tag != "" {
overrideValues["controller.image.tag"] = r.GatewayOptions.Tag
}
return overrideValues
}

View File

@@ -1,85 +0,0 @@
/*
Copyright 2021 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"os"
"path/filepath"
"testing"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var testEnv *envtest.Environment
func TestApplicationController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Application Controller Test Suite")
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "ks-core", "crds")},
AttachControlPlaneOutput: false,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
var _ = Context("Helm reconcier", func() {
Describe("Gateway", func() {
It("Should setup gateway helm reconcier", func() {
data := "- group: gateway.kubesphere.io\n version: v1alpha1\n kind: Gateway\n chart: ../../../config/gateway\n"
f, _ := os.CreateTemp("", "watch")
os.WriteFile(f.Name(), []byte(data), 0)
mgr, err := ctrl.NewManager(cfg, ctrl.Options{MetricsBindAddress: "0"})
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
reconciler := &Reconciler{GatewayOptions: &gateway.Options{WatchesPath: f.Name()}}
err = reconciler.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup helm reconciler")
})
})
})

View File

@@ -1,18 +1,7 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package job
@@ -21,173 +10,74 @@ import (
"encoding/json"
"fmt"
"reflect"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"time"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
batchv1informers "k8s.io/client-go/informers/batch/v1"
batchv1listers "k8s.io/client-go/listers/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
const (
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
// sequence of delays between successive queuings of a service.
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
revisionsAnnotationKey = "revisions"
controllerName = "job-revision"
)
type JobController struct {
client clientset.Interface
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
jobLister batchv1listers.JobLister
jobSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
workerLoopPeriod time.Duration
type Reconciler struct {
client.Client
}
func NewJobController(jobInformer batchv1informers.JobInformer, client clientset.Interface) *JobController {
v := &JobController{
client: client,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"),
workerLoopPeriod: time.Second,
}
v.jobLister = jobInformer.Lister()
v.jobSynced = jobInformer.Informer().HasSynced
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.enqueueJob,
UpdateFunc: func(old, cur interface{}) {
v.enqueueJob(cur)
},
})
return v
func (r *Reconciler) Name() string {
return controllerName
}
func (v *JobController) Start(ctx context.Context) error {
return v.Run(5, ctx.Done())
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return builder.
ControllerManagedBy(mgr).
For(
&batchv1.Job{},
builder.WithPredicates(
predicate.ResourceVersionChangedPredicate{},
),
).
WithOptions(controller.Options{
MaxConcurrentReconciles: 2,
}).
Complete(r)
}
func (v *JobController) Run(workers int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer v.queue.ShutDown()
klog.Info("starting job controller")
defer klog.Info("shutting down job controller")
if !cache.WaitForCacheSync(stopCh, v.jobSynced) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < workers; i++ {
go wait.Until(v.worker, v.workerLoopPeriod, stopCh)
}
<-stopCh
return nil
}
func (v *JobController) enqueueJob(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
return
}
v.queue.Add(key)
}
func (v *JobController) worker() {
for v.processNextWorkItem() {
}
}
func (v *JobController) processNextWorkItem() bool {
eKey, quit := v.queue.Get()
if quit {
return false
}
defer v.queue.Done(eKey)
err := v.syncJob(eKey.(string))
v.handleErr(err, eKey)
return true
}
// main function of the reconcile for job
// job's name is same with the service that created it
func (v *JobController) syncJob(key string) error {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
startTime := time.Now()
defer func() {
klog.V(4).Info("Finished syncing job.", "key", key, "duration", time.Since(startTime))
klog.V(4).Info("Finished syncing job.", "key", req.String(), "duration", time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
job := &batchv1.Job{}
if err := r.Get(ctx, req.NamespacedName, job); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
job, err := v.jobLister.Jobs(namespace).Get(name)
if err != nil {
// has been deleted
if errors.IsNotFound(err) {
return nil
}
klog.Error(err, "get job failed", "namespace", namespace, "name", name)
return err
if err := r.makeRevision(ctx, job); err != nil {
klog.Error(err, "make job revision failed", "namespace", req.Namespace, "name", req.Name)
return ctrl.Result{}, err
}
err = v.makeRevision(job)
if err != nil {
klog.Error(err, "make job revision failed", "namespace", namespace, "name", name)
return err
}
return nil
return ctrl.Result{}, nil
}
func (v *JobController) handleErr(err error, key interface{}) {
if err == nil {
v.queue.Forget(key)
return
}
if v.queue.NumRequeues(key) < maxRetries {
klog.V(2).Info("Error syncing job, retrying.", "key", key, "error", err)
v.queue.AddRateLimited(key)
return
}
klog.V(4).Info("Dropping job out of the queue", "key", key, "error", err)
v.queue.Forget(key)
utilruntime.HandleError(err)
}
func (v *JobController) makeRevision(job *batchv1.Job) error {
func (r *Reconciler) makeRevision(ctx context.Context, job *batchv1.Job) error {
revisionIndex := -1
revisions, err := v.getRevisions(job)
revisions, err := r.getRevisions(job)
// failed get revisions
if err != nil {
return nil
@@ -196,7 +86,7 @@ func (v *JobController) makeRevision(job *batchv1.Job) error {
uid := job.UID
for index, revision := range revisions {
if revision.Uid == string(uid) {
currentRevision := v.getCurrentRevision(job)
currentRevision := r.getCurrentRevision(job)
if reflect.DeepEqual(currentRevision, revision) {
return nil
} else {
@@ -210,7 +100,7 @@ func (v *JobController) makeRevision(job *batchv1.Job) error {
revisionIndex = len(revisions) + 1
}
revisions[revisionIndex] = v.getCurrentRevision(job)
revisions[revisionIndex] = r.getCurrentRevision(job)
revisionsByte, err := json.Marshal(revisions)
if err != nil {
@@ -221,17 +111,11 @@ func (v *JobController) makeRevision(job *batchv1.Job) error {
if job.Annotations == nil {
job.Annotations = make(map[string]string)
}
job.Annotations[revisionsAnnotationKey] = string(revisionsByte)
_, err = v.client.BatchV1().Jobs(job.Namespace).Update(context.Background(), job, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
return r.Update(ctx, job)
}
func (v *JobController) getRevisions(job *batchv1.Job) (JobRevisions, error) {
func (r *Reconciler) getRevisions(job *batchv1.Job) (JobRevisions, error) {
revisions := make(JobRevisions)
if revisionsStr := job.Annotations[revisionsAnnotationKey]; revisionsStr != "" {
@@ -244,14 +128,14 @@ func (v *JobController) getRevisions(job *batchv1.Job) (JobRevisions, error) {
return revisions, nil
}
func (v *JobController) getCurrentRevision(item *batchv1.Job) JobRevision {
func (r *Reconciler) getCurrentRevision(item *batchv1.Job) JobRevision {
var revision JobRevision
for _, condition := range item.Status.Conditions {
if condition.Type == batchv1.JobFailed && condition.Status == v1.ConditionTrue {
if condition.Type == batchv1.JobFailed && condition.Status == corev1.ConditionTrue {
revision.Status = Failed
revision.Reasons = append(revision.Reasons, condition.Reason)
revision.Messages = append(revision.Messages, condition.Message)
} else if condition.Type == batchv1.JobComplete && condition.Status == v1.ConditionTrue {
} else if condition.Type == batchv1.JobComplete && condition.Status == corev1.ConditionTrue {
revision.Status = Completed
}
}

View File

@@ -1,72 +1,25 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package job
import (
"reflect"
"context"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"kubesphere.io/kubesphere/pkg/scheme"
)
var (
noResyncPeriodFunc = func() time.Duration { return 0 }
)
type fixture struct {
t *testing.T
kubeclient *k8sfake.Clientset
//nolint:unused
jobController *JobController
jobLister []*batchv1.Job
//nolint:unused
kubeactions []core.Action
actions []core.Action
kubeobjects []runtime.Object
objects []runtime.Object
}
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "jobs") ||
action.Matches("watch", "jobs")) {
continue
}
ret = append(ret, action)
}
return ret
}
func newJob(name string, spec batchv1.JobSpec) *batchv1.Job {
job := &batchv1.Job{
return &batchv1.Job{
TypeMeta: metav1.TypeMeta{APIVersion: batchv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Name: name,
@@ -74,136 +27,35 @@ func newJob(name string, spec batchv1.JobSpec) *batchv1.Job {
},
Spec: spec,
}
return job
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
f.kubeobjects = []runtime.Object{}
return f
}
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
func (f *fixture) newController() (*JobController, kubeinformers.SharedInformerFactory) {
f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
jobController := NewJobController(k8sI.Batch().V1().Jobs(), f.kubeclient)
for _, job := range f.jobLister {
_ = k8sI.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
}
return jobController, k8sI
}
func (f *fixture) runController(jobName string, startInformers bool, expectError bool) {
c, k8sI := f.newController()
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
k8sI.Start(stopCh)
}
err := c.syncJob(jobName)
if !expectError && err != nil {
f.t.Errorf("error syncing job: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing job, got nil")
}
actions := filterInformerActions(f.kubeclient.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
}
func (f *fixture) expectAddAnnotationAction(job *batchv1.Job) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "jobs"}, job.Namespace, job)
f.actions = append(f.actions, action)
}
func (f *fixture) run(jobName string) {
f.runController(jobName, true, false)
}
func TestAddAnnotation(t *testing.T) {
f := newFixture(t)
job := newJob("test", batchv1.JobSpec{})
f.jobLister = append(f.jobLister, job)
f.objects = append(f.objects, job)
fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(job).Build()
f.kubeobjects = append(f.kubeobjects, job)
reconciler := &Reconciler{}
reconciler.Client = fakeClient
f.expectAddAnnotationAction(job)
f.run(getKey(job, t))
}
func getKey(job *batchv1.Job, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(job)
if err != nil {
t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err)
return ""
tests := []struct {
name string
req types.NamespacedName
isErr bool
}{
{
name: "normal test",
req: types.NamespacedName{
Namespace: job.Namespace,
Name: job.Name,
},
isErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if _, err := reconciler.Reconcile(context.Background(), ctrl.Request{NamespacedName: tt.req}); tt.isErr != (err != nil) {
t.Errorf("%s Reconcile() unexpected error: %v", tt.name, err)
}
})
}
return key
}

View File

@@ -1,18 +1,7 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package job

View File

@@ -1,21 +1,7 @@
// Copyright 2022 The KubeSphere Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package application
package k8sapplication
import (
corev1 "k8s.io/api/core/v1"

View File

@@ -1,20 +1,9 @@
/*
Copyright 2020 KubeSphere Authors
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
package k8sapplication
import (
"context"
@@ -35,6 +24,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -44,22 +34,83 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
servicemeshv1alpha2 "kubesphere.io/api/servicemesh/v1alpha2"
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
// ApplicationReconciler reconciles a Application object
type ApplicationReconciler struct {
const controllerName = "k8sapplication"
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
// Reconciler reconciles a Application object
type Reconciler struct {
client.Client
Mapper meta.RESTMapper
Scheme *runtime.Scheme
ApplicationSelector labels.Selector //
ApplicationSelector labels.Selector
}
func (r *ApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Name() string {
return controllerName
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.Mapper = mgr.GetRESTMapper()
r.Scheme = mgr.GetScheme()
selector, err := labels.Parse(mgr.Options.ComposedAppOptions.AppSelector)
if err != nil {
return err
}
r.ApplicationSelector = selector
c, err := ctrl.NewControllerManagedBy(mgr).
For(&appv1beta1.Application{}).
Build(r)
if err != nil {
return err
}
sources := []client.Object{
&v1.Deployment{},
&corev1.Service{},
&v1.StatefulSet{},
&networkv1.Ingress{},
}
for _, s := range sources {
// Watch for changes to Application
err = c.Watch(
source.Kind(mgr.GetCache(), s),
handler.EnqueueRequestsFromMapFunc(
func(ctx context.Context, obj client.Object) []reconcile.Request {
return []reconcile.Request{{NamespacedName: types.NamespacedName{
Name: GetApplictionName(obj.GetLabels()),
Namespace: obj.GetNamespace()}}}
}),
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return isApp(e.ObjectOld, e.ObjectOld)
},
CreateFunc: func(e event.CreateEvent) bool {
return isApp(e.Object)
},
DeleteFunc: func(e event.DeleteEvent) bool {
return isApp(e.Object)
},
})
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var app appv1beta1.Application
err := r.Get(context.Background(), req.NamespacedName, &app)
err := r.Get(ctx, req.NamespacedName, &app)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
@@ -81,19 +132,19 @@ func (r *ApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, nil
}
resources, errs := r.updateComponents(context.Background(), &app)
newApplicationStatus := r.getNewApplicationStatus(context.Background(), &app, resources, &errs)
resources, errs := r.updateComponents(ctx, &app)
newApplicationStatus := r.getNewApplicationStatus(ctx, &app, resources, &errs)
newApplicationStatus.ObservedGeneration = app.Generation
if equality.Semantic.DeepEqual(newApplicationStatus, &app.Status) {
return ctrl.Result{}, nil
}
err = r.updateApplicationStatus(context.Background(), req.NamespacedName, newApplicationStatus)
err = r.updateApplicationStatus(ctx, req.NamespacedName, newApplicationStatus)
return ctrl.Result{}, err
}
func (r *ApplicationReconciler) updateComponents(ctx context.Context, app *appv1beta1.Application) ([]*unstructured.Unstructured, []error) {
func (r *Reconciler) updateComponents(ctx context.Context, app *appv1beta1.Application) ([]*unstructured.Unstructured, []error) {
var errs []error
resources := r.fetchComponentListResources(ctx, app.Spec.ComponentGroupKinds, app.Spec.Selector, app.Namespace, &errs)
@@ -107,7 +158,7 @@ func (r *ApplicationReconciler) updateComponents(ctx context.Context, app *appv1
return resources, errs
}
func (r *ApplicationReconciler) getNewApplicationStatus(ctx context.Context, app *appv1beta1.Application, resources []*unstructured.Unstructured, errList *[]error) *appv1beta1.ApplicationStatus {
func (r *Reconciler) getNewApplicationStatus(ctx context.Context, app *appv1beta1.Application, resources []*unstructured.Unstructured, errList *[]error) *appv1beta1.ApplicationStatus {
objectStatuses := r.objectStatuses(ctx, resources, errList)
errs := utilerrors.NewAggregate(*errList)
@@ -135,7 +186,7 @@ func (r *ApplicationReconciler) getNewApplicationStatus(ctx context.Context, app
return newApplicationStatus
}
func (r *ApplicationReconciler) fetchComponentListResources(ctx context.Context, groupKinds []metav1.GroupKind, selector *metav1.LabelSelector, namespace string, errs *[]error) []*unstructured.Unstructured {
func (r *Reconciler) fetchComponentListResources(ctx context.Context, groupKinds []metav1.GroupKind, selector *metav1.LabelSelector, namespace string, errs *[]error) []*unstructured.Unstructured {
var resources []*unstructured.Unstructured
if selector == nil {
@@ -169,7 +220,7 @@ func (r *ApplicationReconciler) fetchComponentListResources(ctx context.Context,
return resources
}
func (r *ApplicationReconciler) setOwnerRefForResources(ctx context.Context, ownerRef metav1.OwnerReference, resources []*unstructured.Unstructured) error {
func (r *Reconciler) setOwnerRefForResources(ctx context.Context, ownerRef metav1.OwnerReference, resources []*unstructured.Unstructured) error {
for _, resource := range resources {
ownerRefs := resource.GetOwnerReferences()
ownerRefFound := false
@@ -198,7 +249,7 @@ func (r *ApplicationReconciler) setOwnerRefForResources(ctx context.Context, own
return nil
}
func (r *ApplicationReconciler) objectStatuses(ctx context.Context, resources []*unstructured.Unstructured, errs *[]error) []appv1beta1.ObjectStatus {
func (r *Reconciler) objectStatuses(ctx context.Context, resources []*unstructured.Unstructured, errs *[]error) []appv1beta1.ObjectStatus {
var objectStatuses []appv1beta1.ObjectStatus
for _, resource := range resources {
os := appv1beta1.ObjectStatus{
@@ -232,7 +283,7 @@ func aggregateReady(objectStatuses []appv1beta1.ObjectStatus) (bool, int) {
return false, countReady
}
func (r *ApplicationReconciler) updateApplicationStatus(ctx context.Context, nn types.NamespacedName, status *appv1beta1.ApplicationStatus) error {
func (r *Reconciler) updateApplicationStatus(ctx context.Context, nn types.NamespacedName, status *appv1beta1.ApplicationStatus) error {
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
original := &appv1beta1.Application{}
if err := r.Get(ctx, nn, original); err != nil {
@@ -249,57 +300,9 @@ func (r *ApplicationReconciler) updateApplicationStatus(ctx context.Context, nn
return nil
}
func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
c, err := ctrl.NewControllerManagedBy(mgr).
Named("application-controller").
For(&appv1beta1.Application{}).Build(r)
if err != nil {
return err
}
sources := []client.Object{
&v1.Deployment{},
&corev1.Service{},
&v1.StatefulSet{},
&networkv1.Ingress{},
&servicemeshv1alpha2.ServicePolicy{},
&servicemeshv1alpha2.Strategy{},
}
for _, s := range sources {
// Watch for changes to Application
err = c.Watch(
&source.Kind{Type: s},
handler.EnqueueRequestsFromMapFunc(
func(h client.Object) []reconcile.Request {
return []reconcile.Request{{NamespacedName: types.NamespacedName{
Name: servicemesh.GetApplictionName(h.GetLabels()),
Namespace: h.GetNamespace()}}}
}),
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return isApp(e.ObjectOld, e.ObjectOld)
},
CreateFunc: func(e event.CreateEvent) bool {
return isApp(e.Object)
},
DeleteFunc: func(e event.DeleteEvent) bool {
return isApp(e.Object)
},
})
if err != nil {
return err
}
}
return nil
}
var _ reconcile.Reconciler = &ApplicationReconciler{}
func isApp(obs ...metav1.Object) bool {
for _, o := range obs {
if o.GetLabels() != nil && servicemesh.IsAppComponent(o.GetLabels()) {
if o.GetLabels() != nil && IsAppComponent(o.GetLabels()) {
return true
}
}

View File

@@ -1,7 +1,7 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package application
package k8sapplication
import (
"strings"

View File

@@ -0,0 +1,102 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package k8sapplication
import (
"strings"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
AppLabel = "app"
VersionLabel = "version"
ApplicationNameLabel = "app.kubernetes.io/name"
ApplicationVersionLabel = "app.kubernetes.io/version"
)
// resource with these following labels considered as part of servicemesh
var ApplicationLabels = [...]string{
ApplicationNameLabel,
ApplicationVersionLabel,
AppLabel,
}
// resource with these following labels considered as part of kubernetes-sigs/application
var AppLabels = [...]string{
ApplicationNameLabel,
ApplicationVersionLabel,
}
var TrimChars = [...]string{".", "_", "-"}
// normalize version names
// strip [_.-]
func NormalizeVersionName(version string) string {
for _, char := range TrimChars {
version = strings.ReplaceAll(version, char, "")
}
return version
}
func GetApplictionName(lbs map[string]string) string {
if name, ok := lbs[ApplicationNameLabel]; ok {
return name
}
return ""
}
func GetComponentName(meta *v1.ObjectMeta) string {
if len(meta.Labels[AppLabel]) > 0 {
return meta.Labels[AppLabel]
}
return ""
}
func GetComponentVersion(meta *v1.ObjectMeta) string {
if len(meta.Labels[VersionLabel]) > 0 {
return meta.Labels[VersionLabel]
}
return ""
}
func ExtractApplicationLabels(meta *v1.ObjectMeta) map[string]string {
labels := make(map[string]string, len(ApplicationLabels))
for _, label := range ApplicationLabels {
if _, ok := meta.Labels[label]; !ok {
return nil
} else {
labels[label] = meta.Labels[label]
}
}
return labels
}
func IsApplicationComponent(lbs map[string]string) bool {
for _, label := range ApplicationLabels {
if _, ok := lbs[label]; !ok {
return false
}
}
return true
}
// Whether it belongs to kubernetes-sigs/application or not
func IsAppComponent(lbs map[string]string) bool {
for _, label := range AppLabels {
if _, ok := lbs[label]; !ok {
return false
}
}
return true
}

View File

@@ -0,0 +1,228 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package ksserviceaccount
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/go-logr/logr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
const (
AnnotationServiceAccountName = "kubesphere.io/serviceaccount-name"
ServiceAccountVolumeName = "kubesphere-service-account"
VolumeMountPath = "/var/run/secrets/kubesphere.io/serviceaccount"
caCertName = "kubesphere-root-ca.crt"
caCertKey = "ca.crt"
)
const webhookName = "service-account-injector"
func (w *Webhook) Name() string {
return webhookName
}
var _ kscontroller.Controller = &Webhook{}
type Webhook struct {
}
func (w *Webhook) SetupWithManager(mgr *kscontroller.Manager) error {
serviceAccountPodInjector := &PodInjector{
Log: mgr.GetLogger(),
Decoder: admission.NewDecoder(mgr.GetScheme()),
Client: mgr.GetClient(),
tls: mgr.Options.KubeSphereOptions.TLS,
}
mgr.GetWebhookServer().Register("/serviceaccount-pod-injector", &webhook.Admission{Handler: serviceAccountPodInjector})
return nil
}
// PodInjector injects a token (issue by KubeSphere) by mounting a secret to the
// pod when the pod specifies a KubeSphere service account.
type PodInjector struct {
client.Client
Log logr.Logger
Decoder *admission.Decoder
tls bool
}
func (i *PodInjector) Handle(ctx context.Context, req admission.Request) admission.Response {
log := i.Log.WithValues("namespace", req.Namespace).WithValues("pod", req.Name)
var saName string
pod := &v1.Pod{}
err := i.Decoder.Decode(req, pod)
if err != nil {
log.Error(err, "decode pod failed")
return admission.Errored(http.StatusInternalServerError, err)
}
if pod.Annotations != nil {
saName = pod.Annotations[AnnotationServiceAccountName]
}
if saName == "" {
log.V(6).Info("pod does not specify kubesphere service account, skip it")
return admission.Allowed("")
}
sa := &corev1alpha1.ServiceAccount{}
err = i.Client.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: saName}, sa)
if err != nil {
if errors.IsNotFound(err) {
return admission.Errored(http.StatusNotFound, err)
}
return admission.Errored(http.StatusInternalServerError, err)
}
err = i.injectServiceAccountTokenAndCACert(ctx, pod, sa)
if err != nil {
log.WithValues("serviceaccount", sa.Name).
Error(err, "inject service account token to pod failed")
if errors.IsNotFound(err) {
return admission.Errored(http.StatusNotFound, err)
}
return admission.Errored(http.StatusInternalServerError, err)
}
log.V(6).WithValues("serviceaccount", sa.Name).
Info("successfully injected service account token into the pod")
marshal, err := json.Marshal(pod)
if err != nil {
log.Error(err, "marshal pod to json failed")
return admission.Errored(http.StatusInternalServerError, err)
}
return admission.PatchResponseFromRaw(req.Object.Raw, marshal)
}
func (i *PodInjector) injectServiceAccountTokenAndCACert(ctx context.Context, pod *v1.Pod, sa *corev1alpha1.ServiceAccount) error {
var tokenName string
if len(sa.Secrets) == 0 {
return fmt.Errorf("can not find any token in service account: %s", sa.Name)
}
// just select the first token, cause usually sa has only one token
tokenName = sa.Secrets[0].Name
secret := &v1.Secret{}
err := i.Client.Get(ctx, types.NamespacedName{Namespace: sa.Namespace, Name: tokenName}, secret)
if err != nil {
return err
}
volume := v1.Volume{
Name: ServiceAccountVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: tokenName,
},
Items: []v1.KeyToPath{
{
Key: corev1alpha1.ServiceAccountToken,
Path: corev1alpha1.ServiceAccountToken,
},
},
},
},
},
},
},
}
if i.tls {
if err = i.createCertConfigMap(ctx, pod.Namespace); err != nil {
return err
}
volume.Projected.Sources = append(volume.Projected.Sources, v1.VolumeProjection{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: caCertName,
},
Items: []v1.KeyToPath{
{
Key: caCertKey,
Path: caCertKey,
},
},
},
})
}
volumeMount := v1.VolumeMount{
Name: ServiceAccountVolumeName,
ReadOnly: true,
MountPath: VolumeMountPath,
}
if pod.Spec.Volumes == nil {
volumes := make([]v1.Volume, 0)
pod.Spec.Volumes = volumes
}
pod.Spec.Volumes = append(pod.Spec.Volumes, volume)
// mount to every container
for i, c := range pod.Spec.Containers {
volumeMounts := append(c.VolumeMounts, volumeMount)
pod.Spec.Containers[i].VolumeMounts = volumeMounts
}
// mount to init container
for i, c := range pod.Spec.InitContainers {
volumeMounts := append(c.VolumeMounts, volumeMount)
pod.Spec.InitContainers[i].VolumeMounts = volumeMounts
}
return nil
}
func (i *PodInjector) createCertConfigMap(ctx context.Context, namespace string) error {
cm := &v1.ConfigMap{}
if err := i.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: caCertName}, cm); err != nil {
if errors.IsNotFound(err) {
secret := &v1.Secret{}
if err = i.Client.Get(ctx, types.NamespacedName{
Namespace: constants.KubeSphereNamespace,
Name: "ks-apiserver-tls-certs",
}, secret); err != nil {
return err
}
cm = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: caCertName,
},
BinaryData: map[string][]byte{
caCertKey: secret.Data[caCertKey],
},
}
return i.Client.Create(ctx, cm)
}
return err
}
return nil
}

View File

@@ -0,0 +1,181 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package ksserviceaccount
import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1alpha1 "kubesphere.io/api/core/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
)
const (
controllerName = "ks-serviceaccount"
finalizer = "finalizers.kubesphere.io/serviceaccount"
messageCreateSecretSuccessfully = "Create token secret successfully"
reasonInvalidSecret = "InvalidSecret"
)
var _ kscontroller.Controller = &Reconciler{}
func (r *Reconciler) Name() string {
return controllerName
}
type Reconciler struct {
client.Client
Scheme *runtime.Scheme
Logger logr.Logger
EventRecorder record.EventRecorder
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.EventRecorder = mgr.GetEventRecorderFor(controllerName)
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
return builder.
ControllerManagedBy(mgr).
For(
&corev1alpha1.ServiceAccount{},
builder.WithPredicates(
predicate.ResourceVersionChangedPredicate{},
),
).
WithOptions(controller.Options{
MaxConcurrentReconciles: 2,
}).
Complete(r)
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (reconcile.Result, error) {
logger := r.Logger.WithValues(req.NamespacedName, "ServiceAccount")
sa := &corev1alpha1.ServiceAccount{}
if err := r.Get(ctx, req.NamespacedName, sa); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if sa.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(sa, finalizer) {
deepCopy := sa.DeepCopy()
deepCopy.Finalizers = append(deepCopy.Finalizers, finalizer)
if len(sa.Secrets) == 0 {
secretCreated, err := r.createTokenSecret(ctx, sa)
if err != nil {
logger.Error(err, "create secret failed")
return ctrl.Result{}, err
}
logger.V(4).WithName(secretCreated.Name).Info("secret created successfully")
deepCopy.Secrets = append(deepCopy.Secrets, v1.ObjectReference{
Namespace: secretCreated.Namespace,
Name: secretCreated.Name,
})
r.EventRecorder.Event(deepCopy, corev1.EventTypeNormal, kscontroller.Synced, messageCreateSecretSuccessfully)
}
if err := r.Update(ctx, deepCopy); err != nil {
logger.Error(err, "update serviceaccount failed")
return ctrl.Result{}, err
}
}
} else {
if controllerutil.ContainsFinalizer(sa, finalizer) {
if err := r.deleteSecretToken(ctx, sa, logger); err != nil {
logger.Error(err, "delete secret failed")
return ctrl.Result{}, err
}
_ = controllerutil.RemoveFinalizer(sa, finalizer)
if err := r.Update(ctx, sa); err != nil {
logger.Error(err, "update serviceaccount failed")
return ctrl.Result{}, err
}
}
}
if err := r.checkAllSecret(ctx, sa); err != nil {
logger.Error(err, "failed check secrets")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) createTokenSecret(ctx context.Context, sa *corev1alpha1.ServiceAccount) (*v1.Secret, error) {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-", sa.Name),
Namespace: sa.Namespace,
Annotations: map[string]string{corev1alpha1.ServiceAccountName: sa.Name},
},
Type: corev1alpha1.SecretTypeServiceAccountToken,
}
return secret, r.Client.Create(ctx, secret)
}
func (r *Reconciler) deleteSecretToken(ctx context.Context, sa *corev1alpha1.ServiceAccount, logger logr.Logger) error {
for _, secretName := range sa.Secrets {
secret := &v1.Secret{}
if err := r.Get(ctx, client.ObjectKey{Namespace: secretName.Namespace, Name: secretName.Name}, secret); err != nil {
if errors.IsNotFound(err) {
continue
} else {
return err
}
}
if err := r.checkSecretToken(secret, sa.Name); err == nil {
if err = r.Delete(ctx, secret); err != nil {
return err
}
logger.V(2).WithName(secretName.Name).Info("delete secret successfully")
}
}
return nil
}
func (r *Reconciler) checkAllSecret(ctx context.Context, sa *corev1alpha1.ServiceAccount) error {
for _, secretRef := range sa.Secrets {
secret := &v1.Secret{}
if err := r.Get(ctx, client.ObjectKey{Namespace: sa.Namespace, Name: secretRef.Name}, secret); err != nil {
if errors.IsNotFound(err) {
r.EventRecorder.Event(sa, corev1.EventTypeWarning, reasonInvalidSecret, err.Error())
continue
}
return err
}
if err := r.checkSecretToken(secret, sa.Name); err != nil {
r.EventRecorder.Event(sa, corev1.EventTypeWarning, reasonInvalidSecret, err.Error())
}
}
return nil
}
// checkSecretTokens Check if there has valid token, and the invalid token reference will be deleted
func (r *Reconciler) checkSecretToken(secret *v1.Secret, subjectName string) error {
if secret.Type != corev1alpha1.SecretTypeServiceAccountToken {
return fmt.Errorf("unsupported secret %s type: %s", secret.Name, secret.Type)
}
if saName := secret.Annotations[corev1alpha1.ServiceAccountName]; saName != subjectName {
return fmt.Errorf("incorrect subject name %s", saName)
}
return nil
}

View File

@@ -0,0 +1,249 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package kubeconfig
import (
"bytes"
"context"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"time"
certificatesv1 "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
"kubesphere.io/kubesphere/pkg/utils/pkiutil"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
controllerName = "kubeconfig"
residual = 30 * 24 * time.Hour
)
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
// Reconciler reconciles a User object
type Reconciler struct {
client.Client
config *rest.Config
}
func (r *Reconciler) Name() string {
return controllerName
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.config = mgr.K8sClient.Config()
return ctrl.NewControllerManagedBy(mgr).
Named(controllerName).
WithOptions(controller.Options{MaxConcurrentReconciles: 1}).
For(&corev1.Secret{},
builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
secret := object.(*corev1.Secret)
return secret.Namespace == constants.KubeSphereNamespace &&
secret.Type == kubeconfig.SecretTypeKubeConfig &&
secret.Labels[constants.UsernameLabelKey] != ""
}))).
Complete(r)
}
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
secret := &corev1.Secret{}
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !secret.ObjectMeta.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}
if err := r.UpdateSecret(ctx, secret); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) UpdateSecret(ctx context.Context, secret *corev1.Secret) error {
// already exist and cert will not expire in 3 days
if isValid(secret) {
return nil
}
// create a new CSR
var ca []byte
var err error
if len(r.config.CAData) > 0 {
ca = r.config.CAData
} else {
ca, err = os.ReadFile(kubeconfig.InClusterCAFilePath)
if err != nil {
klog.Errorf("Failed to read CA file: %v", err)
return err
}
}
username := secret.Labels[constants.UsernameLabelKey]
currentContext := fmt.Sprintf("%s@%s", username, kubeconfig.DefaultClusterName)
config := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Preferences: clientcmdapi.Preferences{},
Clusters: map[string]*clientcmdapi.Cluster{kubeconfig.DefaultClusterName: {
Server: r.config.Host,
InsecureSkipTLSVerify: false,
CertificateAuthorityData: ca,
}},
Contexts: map[string]*clientcmdapi.Context{currentContext: {
Cluster: kubeconfig.DefaultClusterName,
AuthInfo: username,
Namespace: kubeconfig.DefaultNamespace,
}},
AuthInfos: make(map[string]*clientcmdapi.AuthInfo),
CurrentContext: currentContext,
}
data, err := clientcmd.Write(config)
if err != nil {
klog.Errorf("Failed to write kubeconfig for user %s: %v", username, err)
return err
}
if secret.Annotations == nil {
secret.Annotations = make(map[string]string)
}
secret.Data = map[string][]byte{kubeconfig.FileName: data}
if err = r.Update(ctx, secret); err != nil {
klog.Errorf("Failed to update kubeconfig for user %s: %v", username, err)
return err
}
if err = r.createCSR(ctx, username); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return err
}
return nil
}
func isValid(secret *corev1.Secret) bool {
username := secret.Labels[constants.UsernameLabelKey]
data := secret.Data[kubeconfig.FileName]
if len(data) == 0 {
return false
}
config, err := clientcmd.Load(data)
if err != nil {
klog.Warningf("Failed to load kubeconfig for user %s: %v", username, err)
return false
}
if authInfo, ok := config.AuthInfos[username]; ok {
clientCert, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData)
if err != nil {
klog.Warningf("Failed to parse client certificate for user %s: %v", username, err)
return false
}
for _, cert := range clientCert {
if cert.NotAfter.After(time.Now().Add(residual)) {
return true
}
}
} else {
// in process
return true
}
return false
}
func (r *Reconciler) createCSR(ctx context.Context, username string) error {
csrConfig := &certutil.Config{
CommonName: username,
Organization: nil,
AltNames: certutil.AltNames{},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
x509csr, x509key, err := pkiutil.NewCSRAndKey(csrConfig)
if err != nil {
klog.Errorf("Failed to create CSR and key for user %s: %v", username, err)
return err
}
var csrBuffer, keyBuffer bytes.Buffer
if err = pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(x509key)}); err != nil {
klog.Errorf("Failed to encode private key for user %s: %v", username, err)
return err
}
var csrBytes []byte
if csrBytes, err = x509.CreateCertificateRequest(rand.Reader, x509csr, x509key); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return err
}
if err = pem.Encode(&csrBuffer, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes}); err != nil {
klog.Errorf("Failed to encode CSR for user %s: %v", username, err)
return err
}
csr := csrBuffer.Bytes()
key := keyBuffer.Bytes()
csrName := fmt.Sprintf("%s-csr-%d", username, time.Now().Unix())
k8sCSR := &certificatesv1.CertificateSigningRequest{
TypeMeta: metav1.TypeMeta{
Kind: "CertificateSigningRequest",
APIVersion: "certificates.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: csrName,
Labels: map[string]string{constants.UsernameLabelKey: username},
Annotations: map[string]string{kubeconfig.PrivateKeyAnnotation: string(key)},
},
Spec: certificatesv1.CertificateSigningRequestSpec{
Request: csr,
SignerName: certificatesv1.KubeAPIServerClientSignerName,
Usages: []certificatesv1.KeyUsage{certificatesv1.UsageKeyEncipherment, certificatesv1.UsageClientAuth, certificatesv1.UsageDigitalSignature},
Username: username,
Groups: []string{user.AllAuthenticated},
},
}
if err = r.Create(ctx, k8sCSR); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return err
}
return nil
}

View File

@@ -0,0 +1,81 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package kubectl
import (
"context"
"time"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller"
)
const controllerName = "kubectl"
type Reconciler struct {
client.Client
resyncPeriod time.Duration
renewPeriod time.Duration
}
func (r *Reconciler) Name() string {
return controllerName
}
func (r *Reconciler) NeedLeaderElection() bool {
return true
}
func (r *Reconciler) SetupWithManager(mgr *controller.Manager) error {
r.Client = mgr.GetClient()
r.resyncPeriod = time.Minute
r.renewPeriod = time.Minute
return mgr.Add(r)
}
func (r *Reconciler) Start(ctx context.Context) error {
go wait.UntilWithContext(ctx, func(ctx context.Context) {
if err := r.reconcile(ctx); err != nil {
klog.Errorf("%s controller reconcile error: %s\n", controllerName, err.Error())
}
}, r.resyncPeriod)
return nil
}
func (r *Reconciler) reconcile(ctx context.Context) error {
leases := &coordinationv1.LeaseList{}
if err := r.List(ctx, leases, client.MatchingLabels{constants.KubectlPodLabel: ""}); err != nil {
return err
}
// The minimum required heartbeat time, the heartbeat time of all leases must be greater than this
heartbeatTime := time.Now().Add(-r.renewPeriod)
for i := range leases.Items {
lease := &leases.Items[i]
if lease.Spec.RenewTime.After(heartbeatTime) {
continue
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: lease.Namespace,
Name: lease.Name,
},
}
if err := r.Delete(ctx, pod, client.GracePeriodSeconds(0)); err != nil && !errors.IsNotFound(err) {
klog.Errorf("deleting Pod %s/%s failed: %s, will retry", pod.Namespace, pod.Name, err.Error())
}
}
return nil
}

View File

@@ -0,0 +1,77 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package lease
import (
"context"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"kubesphere.io/kubesphere/pkg/constants"
)
type Operator struct {
client kubernetes.Interface
}
func NewOperator(client kubernetes.Interface) *Operator {
return &Operator{
client: client,
}
}
func (o *Operator) Create(ctx context.Context, owner *corev1.Pod) error {
now := metav1.NowMicro()
lease := &coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Namespace: owner.Namespace,
Name: owner.Name,
Labels: map[string]string{
constants.KubectlPodLabel: "",
constants.KubeSphereManagedLabel: "true",
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "Pod",
UID: owner.GetUID(),
Name: owner.GetName(),
},
},
},
Spec: coordinationv1.LeaseSpec{
AcquireTime: &now,
RenewTime: &now,
},
}
if _, err := o.client.CoordinationV1().Leases(owner.Namespace).Create(ctx, lease, metav1.CreateOptions{}); err != nil {
if errors.IsAlreadyExists(err) {
return o.Renew(ctx, lease.Namespace, lease.Name)
}
return err
}
return nil
}
func (o *Operator) Renew(ctx context.Context, namespace, name string) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
lease, err := o.client.CoordinationV1().Leases(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return err
}
now := metav1.NowMicro()
lease.Spec.RenewTime = &now
if _, err = o.client.CoordinationV1().Leases(namespace).Update(ctx, lease, metav1.UpdateOptions{}); err != nil {
return err
}
return nil
})
}

View File

@@ -1,198 +1,168 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package loginrecord
import (
"context"
"fmt"
"sort"
"strings"
"time"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/controller/utils/controller"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
successSynced = "Synced"
// is synced successfully
messageResourceSynced = "LoginRecord synced successfully"
controllerName = "loginrecord-controller"
)
const controllerName = "loginrecord"
type loginRecordController struct {
controller.BaseController
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
loginRecordLister iamv1alpha2listers.LoginRecordLister
userLister iamv1alpha2listers.UserLister
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
type Reconciler struct {
client.Client
recorder record.EventRecorder
loginHistoryRetentionPeriod time.Duration
loginHistoryMaximumEntries int
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
}
func NewLoginRecordController(k8sClient kubernetes.Interface,
ksClient kubesphere.Interface,
loginRecordInformer iamv1alpha2informers.LoginRecordInformer,
userInformer iamv1alpha2informers.UserInformer,
loginHistoryRetentionPeriod time.Duration,
loginHistoryMaximumEntries int) *loginRecordController {
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &loginRecordController{
BaseController: controller.BaseController{
Workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "LoginRecords"),
Synced: []cache.InformerSynced{loginRecordInformer.Informer().HasSynced, userInformer.Informer().HasSynced},
Name: controllerName,
},
k8sClient: k8sClient,
ksClient: ksClient,
loginRecordLister: loginRecordInformer.Lister(),
userLister: userInformer.Lister(),
loginHistoryRetentionPeriod: loginHistoryRetentionPeriod,
loginHistoryMaximumEntries: loginHistoryMaximumEntries,
recorder: recorder,
}
ctl.Handler = ctl.reconcile
klog.Info("Setting up event handlers")
loginRecordInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.Enqueue,
UpdateFunc: func(old, new interface{}) {
ctl.Enqueue(new)
},
DeleteFunc: ctl.Enqueue,
})
return ctl
func (r *Reconciler) Name() string {
return controllerName
}
func (c *loginRecordController) Start(ctx context.Context) error {
return c.Run(5, ctx.Done())
func (r *Reconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (c *loginRecordController) reconcile(key string) error {
loginRecord, err := c.loginRecordLister.Get(key)
if err != nil {
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("login record '%s' in work queue no longer exists", key))
return nil
}
klog.Error(err)
return err
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.loginHistoryRetentionPeriod = mgr.AuthenticationOptions.LoginHistoryRetentionPeriod
r.loginHistoryMaximumEntries = mgr.AuthenticationOptions.LoginHistoryMaximumEntries
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.Client = mgr.GetClient()
return builder.
ControllerManagedBy(mgr).
For(
&iamv1beta1.LoginRecord{},
builder.WithPredicates(
predicate.ResourceVersionChangedPredicate{},
),
).
WithOptions(controller.Options{
MaxConcurrentReconciles: 2,
}).
Named(controllerName).
Complete(r)
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
loginRecord := &iamv1beta1.LoginRecord{}
if err := r.Get(ctx, req.NamespacedName, loginRecord); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !loginRecord.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is being deleted
// Our finalizer has finished, so the reconciler can do nothing.
return nil
return ctrl.Result{}, nil
}
user, err := c.userForLoginRecord(loginRecord)
user, err := r.userForLoginRecord(ctx, loginRecord)
if err != nil {
// delete orphan object
if errors.IsNotFound(err) {
return c.ksClient.IamV1alpha2().LoginRecords().Delete(context.TODO(), loginRecord.Name, metav1.DeleteOptions{})
if err = r.Delete(ctx, loginRecord); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
return err
return ctrl.Result{}, err
}
if err = c.updateUserLastLoginTime(user, loginRecord); err != nil {
return err
}
if err = c.shrinkEntriesFor(user); err != nil {
return err
if err = r.updateUserLastLoginTime(ctx, user, loginRecord); err != nil {
return ctrl.Result{}, err
}
result := ctrl.Result{}
now := time.Now()
// login record beyonds retention period
if loginRecord.CreationTimestamp.Add(c.loginHistoryRetentionPeriod).Before(now) {
if err = c.ksClient.IamV1alpha2().LoginRecords().Delete(context.Background(), loginRecord.Name, *metav1.NewDeleteOptions(0)); err != nil {
klog.Error(err)
return err
if loginRecord.CreationTimestamp.Add(r.loginHistoryRetentionPeriod).Before(now) {
if err = r.Delete(ctx, loginRecord, client.GracePeriodSeconds(0)); err != nil {
return ctrl.Result{}, err
}
} else { // put item back into the queue
c.Workqueue.AddAfter(key, loginRecord.CreationTimestamp.Add(c.loginHistoryRetentionPeriod).Sub(now))
result = ctrl.Result{
RequeueAfter: loginRecord.CreationTimestamp.Add(r.loginHistoryRetentionPeriod).Sub(now),
}
}
c.recorder.Event(loginRecord, corev1.EventTypeNormal, successSynced, messageResourceSynced)
return nil
if err = r.shrinkEntriesFor(ctx, user); err != nil {
return ctrl.Result{}, err
}
r.recorder.Event(loginRecord, corev1.EventTypeNormal, kscontroller.Synced, kscontroller.MessageResourceSynced)
return result, nil
}
// updateUserLastLoginTime accepts a login object and set user lastLoginTime field
func (c *loginRecordController) updateUserLastLoginTime(user *iamv1alpha2.User, loginRecord *iamv1alpha2.LoginRecord) error {
func (r *Reconciler) updateUserLastLoginTime(ctx context.Context, user *iamv1beta1.User, loginRecord *iamv1beta1.LoginRecord) error {
// update lastLoginTime
if user.DeletionTimestamp.IsZero() &&
(user.Status.LastLoginTime == nil || user.Status.LastLoginTime.Before(&loginRecord.CreationTimestamp)) {
user.Status.LastLoginTime = &loginRecord.CreationTimestamp
_, err := c.ksClient.IamV1alpha2().Users().Update(context.Background(), user, metav1.UpdateOptions{})
return err
return r.Update(ctx, user)
}
return nil
}
// shrinkEntriesFor will delete old entries out of limit
func (c *loginRecordController) shrinkEntriesFor(user *iamv1alpha2.User) error {
loginRecords, err := c.loginRecordLister.List(labels.SelectorFromSet(labels.Set{iamv1alpha2.UserReferenceLabel: user.Name}))
if err != nil {
func (r *Reconciler) shrinkEntriesFor(ctx context.Context, user *iamv1beta1.User) error {
loginRecords := &iamv1beta1.LoginRecordList{}
if err := r.List(ctx, loginRecords, client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(labels.Set{iamv1beta1.UserReferenceLabel: user.Name})}); err != nil {
return err
}
if len(loginRecords) <= c.loginHistoryMaximumEntries {
if len(loginRecords.Items) <= r.loginHistoryMaximumEntries {
return nil
}
sort.Slice(loginRecords, func(i, j int) bool {
return loginRecords[j].CreationTimestamp.After(loginRecords[i].CreationTimestamp.Time)
sort.Slice(loginRecords.Items, func(i, j int) bool {
return loginRecords.Items[j].CreationTimestamp.After(loginRecords.Items[i].CreationTimestamp.Time)
})
oldEntries := loginRecords[:len(loginRecords)-c.loginHistoryMaximumEntries]
for _, r := range oldEntries {
err = c.ksClient.IamV1alpha2().LoginRecords().Delete(context.TODO(), r.Name, metav1.DeleteOptions{})
if err != nil {
oldEntries := loginRecords.Items[:len(loginRecords.Items)-r.loginHistoryMaximumEntries]
for i := range oldEntries {
if err := r.Delete(ctx, &oldEntries[i]); err != nil {
return err
}
}
return nil
}
func (c *loginRecordController) userForLoginRecord(loginRecord *iamv1alpha2.LoginRecord) (*iamv1alpha2.User, error) {
username, ok := loginRecord.Labels[iamv1alpha2.UserReferenceLabel]
func (r *Reconciler) userForLoginRecord(ctx context.Context, loginRecord *iamv1beta1.LoginRecord) (*iamv1beta1.User, error) {
username, ok := loginRecord.Labels[iamv1beta1.UserReferenceLabel]
if !ok || len(username) == 0 {
klog.V(4).Info("login doesn't belong to any user")
return nil, errors.NewNotFound(iamv1alpha2.Resource(iamv1alpha2.ResourcesSingularUser), username)
return nil, errors.NewNotFound(iamv1beta1.Resource(iamv1beta1.ResourcesSingularUser), username)
}
return c.userLister.Get(username)
user := &iamv1beta1.User{}
if err := r.Get(ctx, client.ObjectKey{Name: username}, user); err != nil {
return nil, err
}
return user, nil
}

View File

@@ -1,38 +1,26 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package loginrecord
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakek8s "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
clienttesting "k8s.io/client-go/testing"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"kubesphere.io/kubesphere/pkg/apis"
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/scheme"
)
func TestLoginRecordController(t *testing.T) {
@@ -40,54 +28,43 @@ func TestLoginRecordController(t *testing.T) {
RunSpecs(t, "LoginRecord Controller Test Suite")
}
func newLoginRecord(username string) *iamv1alpha2.LoginRecord {
return &iamv1alpha2.LoginRecord{
func newLoginRecord(username string) *iamv1beta1.LoginRecord {
return &iamv1beta1.LoginRecord{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", username, rand.Intn(1000000)),
Labels: map[string]string{
iamv1alpha2.UserReferenceLabel: username,
iamv1beta1.UserReferenceLabel: username,
},
CreationTimestamp: metav1.Now(),
},
Spec: iamv1alpha2.LoginRecordSpec{
Type: iamv1alpha2.Token,
Spec: iamv1beta1.LoginRecordSpec{
Type: iamv1beta1.Token,
Provider: "",
Success: true,
Reason: iamv1alpha2.AuthenticatedSuccessfully,
Reason: iamv1beta1.AuthenticatedSuccessfully,
SourceIP: "",
UserAgent: "",
},
}
}
func newUser(username string) *iamv1alpha2.User {
return &iamv1alpha2.User{
func newUser(username string) *iamv1beta1.User {
return &iamv1beta1.User{
ObjectMeta: metav1.ObjectMeta{Name: username},
}
}
var _ = Describe("LoginRecord", func() {
var k8sClient *fakek8s.Clientset
var ksClient *fakeks.Clientset
var user *iamv1alpha2.User
var loginRecord *iamv1alpha2.LoginRecord
var controller *loginRecordController
var informers externalversions.SharedInformerFactory
var user *iamv1beta1.User
var loginRecord *iamv1beta1.LoginRecord
var reconciler *Reconciler
BeforeEach(func() {
user = newUser("admin")
loginRecord = newLoginRecord(user.Name)
k8sClient = fakek8s.NewSimpleClientset()
ksClient = fakeks.NewSimpleClientset(loginRecord, user)
informers = externalversions.NewSharedInformerFactory(ksClient, 0)
loginRecordInformer := informers.Iam().V1alpha2().LoginRecords()
userInformer := informers.Iam().V1alpha2().Users()
err := loginRecordInformer.Informer().GetIndexer().Add(loginRecord)
Expect(err).Should(BeNil())
err = userInformer.Informer().GetIndexer().Add(user)
Expect(err).Should(BeNil())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
controller = NewLoginRecordController(k8sClient, ksClient, loginRecordInformer, userInformer, time.Hour, 1)
fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(user, loginRecord).Build()
reconciler = &Reconciler{}
reconciler.Client = fakeClient
reconciler.recorder = record.NewFakeRecorder(2)
})
// Add Tests for OpenAPI validation (or additional CRD features) specified in
@@ -96,20 +73,11 @@ var _ = Describe("LoginRecord", func() {
// test Kubernetes API server, which isn't the goal here.
Context("LoginRecord Controller", func() {
It("Should create successfully", func() {
By("Expecting to reconcile successfully")
err := controller.reconcile(loginRecord.Name)
_, err := reconciler.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{
Name: loginRecord.Name,
}})
Expect(err).Should(BeNil())
By("Expecting to update user last login time successfully")
err = controller.reconcile(loginRecord.Name)
Expect(err).Should(BeNil())
actions := ksClient.Actions()
Expect(len(actions)).Should(Equal(1))
newObject := user.DeepCopy()
newObject.Status.LastLoginTime = &loginRecord.CreationTimestamp
updateAction := clienttesting.NewUpdateAction(iamv1alpha2.SchemeGroupVersion.WithResource(iamv1alpha2.ResourcesPluralUser), "", newObject)
Expect(actions[0]).Should(Equal(updateAction))
})
})
})

98
pkg/controller/manager.go Normal file
View File

@@ -0,0 +1,98 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package controller
import (
"context"
"fmt"
"sort"
"github.com/Masterminds/semver/v3"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/manager"
"kubesphere.io/kubesphere/pkg/controller/options"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
)
const (
SyncFailed = "SyncFailed"
Synced = "Synced"
MessageResourceSynced = "Synced successfully"
)
type Controller interface {
Name() string
SetupWithManager(mgr *Manager) error
}
type Hideable interface {
Hidden() bool
}
type ClusterSelector interface {
Enabled(clusterRole string) bool
}
type Manager struct {
options.Options
manager.Manager
K8sClient k8s.Client
IsControllerEnabled
ClusterClient clusterclient.Interface
K8sVersion *semver.Version
}
type IsControllerEnabled func(controllerName string) bool
func (mgr *Manager) Run(ctx context.Context, registry Registry) error {
for name, ctr := range registry {
if mgr.IsControllerEnabled(name) {
if clusterSelector, ok := ctr.(ClusterSelector); ok &&
!clusterSelector.Enabled(mgr.MultiClusterOptions.ClusterRole) {
klog.Infof("%s controller is enabled but is not going to run due to its dependent component being disabled.", name)
continue
}
if err := ctr.SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to setup %s controller: %v", name, err)
} else {
klog.Infof("%s controller is enabled and added successfully.", name)
}
} else {
klog.Infof("%s controller is disabled by controller selectors.", name)
}
}
klog.V(0).Info("Starting the controllers.")
if err := mgr.Manager.Start(ctx); err != nil {
return fmt.Errorf("unable to start the controller manager: %v", err)
}
return nil
}
func Register(controller Controller) error {
if _, exist := Controllers[controller.Name()]; exist {
return fmt.Errorf("controller %s already exists", controller.Name())
}
Controllers[controller.Name()] = controller
return nil
}
var Controllers = make(Registry)
type Registry map[string]Controller
func (r Registry) Keys() []string {
var keys []string
for k, v := range r {
if hidden, ok := v.(Hideable); ok && hidden.Hidden() {
continue
}
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}

View File

@@ -1,18 +1,7 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package namespace
@@ -20,66 +9,57 @@ import (
"bytes"
"context"
"fmt"
"reflect"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
"kubesphere.io/api/tenant/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
tenantv1alpha1 "kubesphere.io/api/tenant/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/kubesphere/pkg/constants"
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"kubesphere.io/kubesphere/pkg/scheme"
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
)
const (
controllerName = "namespace-controller"
controllerName = "namespace"
finalizer = "finalizers.kubesphere.io/namespaces"
)
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
// Reconciler reconciles a Namespace object
type Reconciler struct {
client.Client
Logger logr.Logger
Recorder record.EventRecorder
MaxConcurrentReconciles int
GatewayOptions *gateway.Options
logger logr.Logger
recorder record.EventRecorder
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Client == nil {
r.Client = mgr.GetClient()
}
if r.Logger.GetSink() == nil {
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
}
if r.Recorder == nil {
r.Recorder = mgr.GetEventRecorderFor(controllerName)
}
if r.MaxConcurrentReconciles <= 0 {
r.MaxConcurrentReconciles = 1
}
func (r *Reconciler) Name() string {
return controllerName
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.logger = ctrl.Log.WithName("controllers").WithName(controllerName)
r.recorder = mgr.GetEventRecorderFor(controllerName)
return ctrl.NewControllerManagedBy(mgr).
Named(controllerName).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
}).
WithOptions(controller.Options{MaxConcurrentReconciles: 2}).
For(&corev1.Namespace{}).
Complete(r)
}
@@ -89,45 +69,31 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=rolebases,verbs=get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("namespace", req.NamespacedName)
rootCtx := context.Background()
logger := r.logger.WithValues("namespace", req.NamespacedName)
ctx = klog.NewContext(ctx, logger)
namespace := &corev1.Namespace{}
if err := r.Get(rootCtx, req.NamespacedName, namespace); err != nil {
if err := r.Get(ctx, req.NamespacedName, namespace); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// name of your custom finalizer
finalizer := "finalizers.kubesphere.io/namespaces"
if namespace.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(namespace.ObjectMeta.Finalizers, finalizer) {
// create only once, ignore already exists error
if err := r.initCreatorRoleBinding(rootCtx, logger, namespace); err != nil {
return ctrl.Result{}, err
}
namespace.ObjectMeta.Finalizers = append(namespace.ObjectMeta.Finalizers, finalizer)
if namespace.Labels == nil {
namespace.Labels = make(map[string]string)
}
// used for NetworkPolicyPeer.NamespaceSelector
namespace.Labels[constants.NamespaceLabelKey] = namespace.Name
if err := r.Update(rootCtx, namespace); err != nil {
if !controllerutil.ContainsFinalizer(namespace, finalizer) {
if err := r.initCreatorRoleBinding(ctx, namespace); err != nil {
return ctrl.Result{}, err
}
updated := namespace.DeepCopy()
controllerutil.AddFinalizer(updated, finalizer)
return ctrl.Result{}, r.Patch(ctx, updated, client.MergeFrom(namespace))
}
} else {
// The object is being deleted
if sliceutil.HasString(namespace.ObjectMeta.Finalizers, finalizer) {
if err := r.deleteGateway(rootCtx, logger, namespace.Name); err != nil {
return ctrl.Result{}, err
}
// remove our finalizer from the list and update it.
namespace.ObjectMeta.Finalizers = sliceutil.RemoveString(namespace.ObjectMeta.Finalizers, func(item string) bool {
return item == finalizer
})
if err := r.Update(rootCtx, namespace); err != nil {
if controllerutil.ContainsFinalizer(namespace, finalizer) {
controllerutil.RemoveFinalizer(namespace, finalizer)
if err := r.Update(ctx, namespace); err != nil {
return ctrl.Result{}, err
}
}
@@ -135,197 +101,133 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
return ctrl.Result{}, nil
}
// Bind to workspace if the namespace created by kubesphere
_, hasWorkspaceLabel := namespace.Labels[tenantv1alpha1.WorkspaceLabel]
// if the namespace doesn't have a label like kubefed.io/managed: "true" (single cluster environment)
// or it has a label like kubefed.io/managed: "false"(multi-cluster environment), we set the owner reference filed.
// Otherwise, kubefed controller will remove owner reference.
kubefedManaged := namespace.Labels[constants.KubefedManagedLabel] == "true"
if !kubefedManaged {
if hasWorkspaceLabel {
if err := r.bindWorkspace(rootCtx, logger, namespace); err != nil {
return ctrl.Result{}, err
}
} else {
if err := r.unbindWorkspace(rootCtx, logger, namespace); err != nil {
return ctrl.Result{}, err
}
}
}
// Initialize roles for devops/project namespaces if created by kubesphere
_, hasDevOpsProjectLabel := namespace.Labels[constants.DevOpsProjectLabelKey]
if hasDevOpsProjectLabel || hasWorkspaceLabel {
if err := r.initRoles(rootCtx, logger, namespace); err != nil {
return ctrl.Result{}, err
}
if err := r.initRoles(ctx, namespace); err != nil {
return ctrl.Result{}, err
}
r.Recorder.Event(namespace, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
if err := r.reconcileWorkspaceOwnerReference(ctx, namespace); err != nil {
return ctrl.Result{}, err
}
r.recorder.Event(namespace, corev1.EventTypeNormal, kscontroller.Synced, kscontroller.MessageResourceSynced)
return ctrl.Result{}, nil
}
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
workspace := &tenantv1alpha1.Workspace{}
if err := r.Get(ctx, types.NamespacedName{Name: namespace.Labels[constants.WorkspaceLabelKey]}, workspace); err != nil {
// remove existed owner reference if workspace not found
if errors.IsNotFound(err) && k8sutil.IsControlledBy(namespace.OwnerReferences, tenantv1alpha1.ResourceKindWorkspace, "") {
return r.unbindWorkspace(ctx, logger, namespace)
func (r *Reconciler) reconcileWorkspaceOwnerReference(ctx context.Context, namespace *corev1.Namespace) error {
workspaceName, hasWorkspaceLabel := namespace.Labels[v1beta1.WorkspaceLabel]
if !hasWorkspaceLabel {
if k8sutil.IsControlledBy(namespace.OwnerReferences, v1beta1.ResourceKindWorkspace, workspaceName) {
namespace.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(namespace.OwnerReferences)
return r.Update(ctx, namespace)
}
// noting to do
return nil
}
workspace := &v1beta1.Workspace{}
if err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, workspace); err != nil {
owner := metav1.GetControllerOf(namespace)
if errors.IsNotFound(err) && owner != nil && owner.Kind == v1beta1.ResourceKindWorkspace {
namespace.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(namespace.OwnerReferences)
return r.Update(ctx, namespace)
}
// skip if workspace not found
return client.IgnoreNotFound(err)
}
// workspace has been deleted
if !workspace.ObjectMeta.DeletionTimestamp.IsZero() {
return r.unbindWorkspace(ctx, logger, namespace)
return nil
}
// owner reference not match workspace label
if !metav1.IsControlledBy(namespace, workspace) {
namespace := namespace.DeepCopy()
namespace.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(namespace.OwnerReferences)
namespace = namespace.DeepCopy()
if err := controllerutil.SetControllerReference(workspace, namespace, scheme.Scheme); err != nil {
logger.Error(err, "set controller reference failed")
return err
}
logger.V(4).Info("update namespace owner reference", "workspace", workspace.Name)
if err := r.Update(ctx, namespace); err != nil {
logger.Error(err, "update namespace failed")
return err
}
}
return nil
}
func (r *Reconciler) unbindWorkspace(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
_, hasWorkspaceLabel := namespace.Labels[tenantv1alpha1.WorkspaceLabel]
if hasWorkspaceLabel || k8sutil.IsControlledBy(namespace.OwnerReferences, tenantv1alpha1.ResourceKindWorkspace, "") {
ns := namespace.DeepCopy()
wsName := k8sutil.GetWorkspaceOwnerName(ns.OwnerReferences)
if hasWorkspaceLabel {
wsName = namespace.Labels[tenantv1alpha1.WorkspaceLabel]
}
delete(ns.Labels, constants.WorkspaceLabelKey)
ns.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(ns.OwnerReferences)
logger.V(4).Info("remove owner reference and label", "namespace", ns.Name, "workspace", wsName)
if err := r.Update(ctx, ns); err != nil {
logger.Error(err, "update owner reference failed")
return err
}
}
return nil
}
// delete gateway
func (r *Reconciler) deleteGateway(ctx context.Context, logger logr.Logger, namespace string) error {
gatewayName := constants.IngressControllerPrefix + namespace
if r.GatewayOptions.Namespace != "" {
namespace = r.GatewayOptions.Namespace
}
gateway := unstructured.Unstructured{}
gateway.SetGroupVersionKind(schema.GroupVersionKind{Group: "gateway.kubesphere.io", Version: "v1alpha1", Kind: "Gateway"})
gateway.SetName(gatewayName)
gateway.SetNamespace(namespace)
logger.V(4).Info("deleting gateway", "namespace", namespace, "name", gatewayName)
err := r.Delete(ctx, &gateway)
if err != nil {
return client.IgnoreNotFound(err)
}
return nil
}
func (r *Reconciler) initRoles(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
var templates iamv1alpha2.RoleBaseList
var labelKey string
// filtering initial roles by label
if namespace.Labels[constants.DevOpsProjectLabelKey] != "" {
// scope.kubesphere.io/devops: ""
labelKey = fmt.Sprintf(iamv1alpha2.ScopeLabelFormat, iamv1alpha2.ScopeDevOps)
} else {
// scope.kubesphere.io/namespace: ""
labelKey = fmt.Sprintf(iamv1alpha2.ScopeLabelFormat, iamv1alpha2.ScopeNamespace)
func (r *Reconciler) initRoles(ctx context.Context, namespace *corev1.Namespace) error {
if _, ok := namespace.Labels[constants.WorkspaceLabelKey]; !ok {
return nil
}
if err := r.List(ctx, &templates, client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(labels.Set{labelKey: ""})}); err != nil {
logger.Error(err, "list role bases failed")
logger := klog.FromContext(ctx)
var templates iamv1beta1.BuiltinRoleList
matchingLabels := client.MatchingLabels{iamv1beta1.ScopeLabel: iamv1beta1.ScopeNamespace}
if err := r.List(ctx, &templates, matchingLabels); err != nil {
return err
}
for _, template := range templates.Items {
var role rbacv1.Role
if err := yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(template.Role.Raw), 1024).Decode(&role); err == nil && role.Kind == iamv1alpha2.ResourceKindRole {
var old rbacv1.Role
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, &old); err != nil {
if errors.IsNotFound(err) {
role.Namespace = namespace.Name
logger.V(4).Info("init builtin role", "role", role.Name)
if err := r.Client.Create(ctx, &role); err != nil {
logger.Error(err, "create role failed")
return err
}
continue
}
}
if !reflect.DeepEqual(role.Labels, old.Labels) ||
!reflect.DeepEqual(role.Annotations, old.Annotations) ||
!reflect.DeepEqual(role.Rules, old.Rules) {
old.Labels = role.Labels
old.Annotations = role.Annotations
old.Rules = role.Rules
logger.V(4).Info("update builtin role", "role", role.Name)
if err := r.Update(ctx, &old); err != nil {
logger.Error(err, "update role failed")
return err
}
selector, err := metav1.LabelSelectorAsSelector(&template.TargetSelector)
if err != nil {
logger.V(4).Error(err, "failed to pares target selector", "template", template.Name)
continue
}
if !selector.Matches(labels.Set(namespace.Labels)) {
continue
}
var builtinRoleTemplate iamv1beta1.Role
if err := yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(template.Role.Raw), 1024).Decode(&builtinRoleTemplate); err == nil &&
builtinRoleTemplate.Kind == iamv1beta1.ResourceKindRole {
existingRole := &iamv1beta1.Role{ObjectMeta: metav1.ObjectMeta{Name: builtinRoleTemplate.Name, Namespace: namespace.Name}}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, existingRole, func() error {
existingRole.Labels = builtinRoleTemplate.Labels
existingRole.Annotations = builtinRoleTemplate.Annotations
existingRole.AggregationRoleTemplates = builtinRoleTemplate.AggregationRoleTemplates
existingRole.Rules = builtinRoleTemplate.Rules
return nil
})
if err != nil {
return err
}
logger.V(4).Info("builtin role successfully initialized", "operation", op)
} else if err != nil {
logger.Error(fmt.Errorf("invalid role base found"), "init roles failed", "name", template.Name)
logger.Error(err, "invalid builtin role found", "name", template.Name)
}
}
return nil
}
func (r *Reconciler) initCreatorRoleBinding(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
func (r *Reconciler) initCreatorRoleBinding(ctx context.Context, namespace *corev1.Namespace) error {
creator := namespace.Annotations[constants.CreatorAnnotationKey]
if creator == "" {
return nil
}
var user iamv1alpha2.User
if err := r.Get(ctx, types.NamespacedName{Name: creator}, &user); err != nil {
return client.IgnoreNotFound(err)
}
creatorRoleBinding := newCreatorRoleBinding(creator, namespace.Name)
logger.V(4).Info("init creator role binding", "creator", user.Name)
if err := r.Client.Create(ctx, creatorRoleBinding); err != nil {
if errors.IsAlreadyExists(err) {
return nil
}
logger.Error(err, "create role binding failed")
return err
}
return nil
}
func newCreatorRoleBinding(creator string, namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
roleBinding := &iamv1beta1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", creator, iamv1alpha2.NamespaceAdmin),
Labels: map[string]string{iamv1alpha2.UserReferenceLabel: creator},
Namespace: namespace,
Name: fmt.Sprintf("%s-%s", creator, iamv1beta1.NamespaceAdmin),
Namespace: namespace.Name,
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: iamv1alpha2.ResourceKindRole,
Name: iamv1alpha2.NamespaceAdmin,
},
Subjects: []rbacv1.Subject{
}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, roleBinding, func() error {
roleBinding.Labels = map[string]string{
iamv1beta1.UserReferenceLabel: creator,
iamv1beta1.RoleReferenceLabel: iamv1beta1.NamespaceAdmin,
}
roleBinding.RoleRef = rbacv1.RoleRef{
APIGroup: iamv1beta1.GroupName,
Kind: iamv1beta1.ResourceKindRole,
Name: iamv1beta1.NamespaceAdmin,
}
roleBinding.Subjects = []rbacv1.Subject{
{
Name: creator,
Kind: iamv1alpha2.ResourceKindUser,
APIGroup: rbacv1.GroupName,
Kind: iamv1beta1.ResourceKindUser,
APIGroup: iamv1beta1.GroupName,
},
},
}
return nil
})
if err != nil {
return err
}
klog.FromContext(ctx).V(4).Info("creator role binding successfully initialized", "operation", op)
return nil
}

View File

@@ -1,39 +1,30 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package namespace
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/onsi/gomega/gexec"
"k8s.io/client-go/kubernetes/scheme"
"kubesphere.io/kubesphere/pkg/controller/controllertest"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"golang.org/x/net/context"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/apis"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"kubesphere.io/kubesphere/pkg/scheme"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
@@ -42,13 +33,15 @@ import (
var k8sClient client.Client
var k8sManager ctrl.Manager
var testEnv *envtest.Environment
var ctx context.Context
var cancel context.CancelFunc
func TestNamespaceController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Namespace Controller Test Suite")
}
var _ = BeforeSuite(func(done Done) {
var _ = BeforeSuite(func() {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
@@ -58,8 +51,10 @@ var _ = BeforeSuite(func(done Done) {
UseExistingCluster: &t,
}
} else {
crdDirPaths, err := controllertest.LoadCrdPath()
Expect(err).ToNot(HaveOccurred())
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "ks-core", "crds")},
CRDDirectoryPaths: crdDirPaths,
AttachControlPlaneOutput: false,
}
}
@@ -68,32 +63,33 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
MetricsBindAddress: "0",
Scheme: scheme.Scheme,
Metrics: metricsserver.Options{
BindAddress: "0",
},
})
Expect(err).ToNot(HaveOccurred())
err = (&Reconciler{}).SetupWithManager(k8sManager)
err = (&Reconciler{}).SetupWithManager(&kscontroller.Manager{Manager: k8sManager})
Expect(err).ToNot(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
err = k8sManager.Start(ctx)
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
gexec.KillAndWait(5 * time.Second)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
Eventually(func() error {
return testEnv.Stop()
}, 30*time.Second, 5*time.Second).ShouldNot(HaveOccurred())
})

View File

@@ -1,18 +1,7 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package namespace
@@ -20,13 +9,12 @@ import (
"context"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
tenantv1alpha1 "kubesphere.io/api/tenant/v1alpha1"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
"kubesphere.io/kubesphere/pkg/constants"
)
@@ -36,7 +24,7 @@ var _ = Describe("Namespace", func() {
const timeout = time.Second * 30
const interval = time.Second * 1
workspace := &tenantv1alpha1.Workspace{
workspace := &tenantv1beta1.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-workspace",
},
@@ -55,7 +43,7 @@ var _ = Describe("Namespace", func() {
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "test-namespace",
Labels: map[string]string{tenantv1alpha1.WorkspaceLabel: workspace.Name},
Labels: map[string]string{tenantv1beta1.WorkspaceLabel: workspace.Name},
},
}

View File

@@ -1,11 +0,0 @@
approvers:
- zheng1
- zryfish
reviewers:
- zheng1
- zryfish
labels:
- area/controller
- area/networking

View File

@@ -1,21 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +kubebuilder:rbac:groups=network.kubesphere.io,resources=namespacenetworkpolicies,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups:core,resource=namespaces,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups:core,resource=services,verbs=get;list;watch;create;update;patch
package network

View File

@@ -1,584 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ippool
import (
"context"
"fmt"
"reflect"
"time"
cnet "github.com/projectcalico/calico/libcalico-go/lib/net"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
k8sinformers "k8s.io/client-go/informers"
coreinfomers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
networkv1alpha1 "kubesphere.io/api/network/v1alpha1"
tenantv1alpha1 "kubesphere.io/api/tenant/v1alpha1"
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
networkInformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
tenantv1alpha1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/network/utils"
"kubesphere.io/kubesphere/pkg/controller/network/webhooks"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
)
var (
ErrCIDROverlap = fmt.Errorf("CIDR is overlap")
)
type IPPoolController struct {
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
provider ippool.Provider
ippoolInformer networkInformer.IPPoolInformer
ippoolSynced cache.InformerSynced
ippoolQueue workqueue.RateLimitingInterface
wsInformer tenantv1alpha1informers.WorkspaceInformer
wsSynced cache.InformerSynced
nsInformer coreinfomers.NamespaceInformer
nsSynced cache.InformerSynced
nsQueue workqueue.RateLimitingInterface
ipamblockInformer networkInformer.IPAMBlockInformer
ipamblockSynced cache.InformerSynced
client clientset.Interface
kubesphereClient kubesphereclient.Interface
}
func (c *IPPoolController) enqueueIPPools(obj interface{}) {
pool, ok := obj.(*networkv1alpha1.IPPool)
if !ok {
utilruntime.HandleError(fmt.Errorf("IPPool informer returned non-ippool object: %#v", obj))
return
}
c.ippoolQueue.Add(pool.Name)
}
func (c *IPPoolController) addFinalizer(pool *networkv1alpha1.IPPool) error {
clone := pool.DeepCopy()
controllerutil.AddFinalizer(clone, networkv1alpha1.IPPoolFinalizer)
if clone.Labels == nil {
clone.Labels = make(map[string]string)
}
clone.Labels[networkv1alpha1.IPPoolNameLabel] = clone.Name
clone.Labels[networkv1alpha1.IPPoolTypeLabel] = clone.Spec.Type
clone.Labels[networkv1alpha1.IPPoolIDLabel] = fmt.Sprintf("%d", clone.ID())
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
if err != nil {
klog.V(3).Infof("Error adding finalizer to pool %s: %v", pool.Name, err)
return err
}
klog.V(3).Infof("Added finalizer to pool %s", pool.Name)
return nil
}
func (c *IPPoolController) removeFinalizer(pool *networkv1alpha1.IPPool) error {
clone := pool.DeepCopy()
controllerutil.RemoveFinalizer(clone, networkv1alpha1.IPPoolFinalizer)
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
if err != nil {
klog.V(3).Infof("Error removing finalizer from pool %s: %v", pool.Name, err)
return err
}
klog.V(3).Infof("Removed protection finalizer from pool %s", pool.Name)
return nil
}
func (c *IPPoolController) ValidateCreate(obj runtime.Object) error {
b := obj.(*networkv1alpha1.IPPool)
ip, cidr, err := cnet.ParseCIDR(b.Spec.CIDR)
if err != nil {
return fmt.Errorf("invalid cidr")
}
size, _ := cidr.Mask.Size()
if ip.IP.To4() != nil && size == 32 {
return fmt.Errorf("the cidr mask must be less than 32")
}
if b.Spec.BlockSize > 0 && b.Spec.BlockSize < size {
return fmt.Errorf("the blocksize should be larger than the cidr mask")
}
if b.Spec.RangeStart != "" || b.Spec.RangeEnd != "" {
iStart := cnet.ParseIP(b.Spec.RangeStart)
iEnd := cnet.ParseIP(b.Spec.RangeEnd)
if iStart == nil || iEnd == nil {
return fmt.Errorf("invalid rangeStart or rangeEnd")
}
offsetStart, err := b.IPToOrdinal(*iStart)
if err != nil {
return err
}
offsetEnd, err := b.IPToOrdinal(*iEnd)
if err != nil {
return err
}
if offsetEnd < offsetStart {
return fmt.Errorf("rangeStart should not big than rangeEnd")
}
}
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(context.TODO(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", b.ID()),
}).String(),
})
if err != nil {
return err
}
for _, p := range pools.Items {
if b.Overlapped(p) {
return fmt.Errorf("ippool cidr is overlapped with %s", p.Name)
}
}
return nil
}
func (c *IPPoolController) validateDefaultIPPool(p *networkv1alpha1.IPPool) error {
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(context.TODO(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(
labels.Set{
networkv1alpha1.IPPoolDefaultLabel: "",
}).String(),
})
if err != nil {
return err
}
poolLen := len(pools.Items)
if poolLen != 1 || pools.Items[0].Name != p.Name {
return nil
}
return fmt.Errorf("Must ensure that there is at least one default ippool")
}
func (c *IPPoolController) ValidateUpdate(old runtime.Object, new runtime.Object) error {
oldP := old.(*networkv1alpha1.IPPool)
newP := new.(*networkv1alpha1.IPPool)
if newP.Spec.CIDR != oldP.Spec.CIDR {
return fmt.Errorf("cidr cannot be modified")
}
if newP.Spec.Type != oldP.Spec.Type {
return fmt.Errorf("ippool type cannot be modified")
}
if newP.Spec.BlockSize != oldP.Spec.BlockSize {
return fmt.Errorf("ippool blockSize cannot be modified")
}
if newP.Spec.RangeEnd != oldP.Spec.RangeEnd || newP.Spec.RangeStart != oldP.Spec.RangeStart {
return fmt.Errorf("ippool rangeEnd/rangeStart cannot be modified")
}
_, defaultOld := oldP.Labels[networkv1alpha1.IPPoolDefaultLabel]
_, defaultNew := newP.Labels[networkv1alpha1.IPPoolDefaultLabel]
if !defaultNew && defaultOld != defaultNew {
err := c.validateDefaultIPPool(newP)
if err != nil {
return err
}
}
return nil
}
func (c *IPPoolController) ValidateDelete(obj runtime.Object) error {
p := obj.(*networkv1alpha1.IPPool)
if p.Status.Allocations > 0 {
return fmt.Errorf("ippool is in use, please remove the workload before deleting")
}
return c.validateDefaultIPPool(p)
}
func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
if old.Spec.Disabled {
return nil
}
clone := old.DeepCopy()
clone.Spec.Disabled = true
_, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
return err
}
func (c *IPPoolController) updateIPPoolStatus(old *networkv1alpha1.IPPool) error {
new, err := c.provider.GetIPPoolStats(old)
if err != nil {
return fmt.Errorf("failed to get ippool %s status %v", old.Name, err)
}
if reflect.DeepEqual(old.Status, new.Status) {
return nil
}
_, err = c.kubesphereClient.NetworkV1alpha1().IPPools().UpdateStatus(context.TODO(), new, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update ippool %s status %v", old.Name, err)
}
return nil
}
func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
klog.V(4).Infof("Processing IPPool %s", name)
startTime := time.Now()
defer func() {
klog.V(4).Infof("Finished processing IPPool %s (%v)", name, time.Since(startTime))
}()
pool, err := c.ippoolInformer.Lister().Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to get ippool %s: %v", name, err)
}
if pool.Type() != c.provider.Type() {
klog.V(4).Infof("pool %s type not match, ignored", pool.Name)
return nil, nil
}
if utils.IsDeletionCandidate(pool, networkv1alpha1.IPPoolFinalizer) {
err = c.disableIPPool(pool)
if err != nil {
return nil, err
}
// Pool should be deleted. Check if it's used and remove finalizer if
// it's not.
canDelete, err := c.provider.DeleteIPPool(pool)
if err != nil {
return nil, err
}
if canDelete {
return nil, c.removeFinalizer(pool)
}
//The ippool is being used, update status and try again later.
delay := time.Second * 3
return &delay, c.updateIPPoolStatus(pool)
}
if utils.NeedToAddFinalizer(pool, networkv1alpha1.IPPoolFinalizer) {
err = c.addFinalizer(pool)
if err != nil {
return nil, err
}
err = c.provider.CreateIPPool(pool)
if err != nil {
klog.V(4).Infof("Provider failed to create IPPool %s, err=%v", pool.Name, err)
return nil, err
}
return nil, c.updateIPPoolStatus(pool)
}
err = c.provider.UpdateIPPool(pool)
if err != nil {
klog.V(4).Infof("Provider failed to update IPPool %s, err=%v", pool.Name, err)
return nil, err
}
return nil, c.updateIPPoolStatus(pool)
}
func (c *IPPoolController) Start(ctx context.Context) error {
go c.provider.SyncStatus(ctx.Done(), c.ippoolQueue)
return c.Run(5, ctx.Done())
}
func (c *IPPoolController) Run(workers int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.ippoolQueue.ShutDown()
klog.Info("starting ippool controller")
defer klog.Info("shutting down ippool controller")
if !cache.WaitForCacheSync(stopCh, c.ippoolSynced, c.ipamblockSynced, c.wsSynced, c.nsSynced) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < workers; i++ {
go wait.Until(c.runIPPoolWorker, time.Second, stopCh)
go wait.Until(c.runNSWorker, time.Second, stopCh)
}
<-stopCh
return nil
}
func (c *IPPoolController) runIPPoolWorker() {
for c.processIPPoolItem() {
}
}
func (c *IPPoolController) processIPPoolItem() bool {
key, quit := c.ippoolQueue.Get()
if quit {
return false
}
defer c.ippoolQueue.Done(key)
delay, err := c.processIPPool(key.(string))
if err == nil {
c.ippoolQueue.Forget(key)
return true
}
if delay != nil {
c.ippoolQueue.AddAfter(key, *delay)
} else {
c.ippoolQueue.AddRateLimited(key)
}
utilruntime.HandleError(fmt.Errorf("error processing ippool %v (will retry): %v", key, err))
return true
}
func (c *IPPoolController) runNSWorker() {
for c.processNSItem() {
}
}
func (c *IPPoolController) processNS(name string) error {
ns, err := c.nsInformer.Lister().Get(name)
if apierrors.IsNotFound(err) {
return nil
}
var poolsName []string
if ns.Labels != nil && ns.Labels[constants.WorkspaceLabelKey] != "" {
pools, err := c.ippoolInformer.Lister().List(labels.SelectorFromSet(labels.Set{
networkv1alpha1.IPPoolDefaultLabel: "",
}))
if err != nil {
return err
}
for _, pool := range pools {
if pool.Status.Synced {
poolsName = append(poolsName, pool.Name)
}
}
}
clone := ns.DeepCopy()
err = c.provider.UpdateNamespace(clone, poolsName)
if err != nil {
return err
}
if reflect.DeepEqual(clone, ns) {
return nil
}
_, err = c.client.CoreV1().Namespaces().Update(context.TODO(), clone, metav1.UpdateOptions{})
return err
}
func (c *IPPoolController) processNSItem() bool {
key, quit := c.nsQueue.Get()
if quit {
return false
}
defer c.nsQueue.Done(key)
err := c.processNS(key.(string))
if err == nil {
c.nsQueue.Forget(key)
return true
}
c.nsQueue.AddRateLimited(key)
utilruntime.HandleError(fmt.Errorf("error processing ns %v (will retry): %v", key, err))
return true
}
func (c *IPPoolController) enqueueIPAMBlocks(obj interface{}) {
block, ok := obj.(*networkv1alpha1.IPAMBlock)
if !ok {
return
}
poolName := block.Labels[networkv1alpha1.IPPoolNameLabel]
c.ippoolQueue.Add(poolName)
}
func (c *IPPoolController) enqueueWorkspace(obj interface{}) {
wk, ok := obj.(*tenantv1alpha1.Workspace)
if !ok {
return
}
pools, err := c.ippoolInformer.Lister().List(labels.SelectorFromSet(labels.Set{
constants.WorkspaceLabelKey: wk.Name,
}))
if err != nil {
klog.Errorf("failed to list ippools by worksapce %s, err=%v", wk.Name, err)
}
for _, pool := range pools {
c.ippoolQueue.Add(pool.Name)
}
}
func (c *IPPoolController) enqueueNamespace(old interface{}, new interface{}) {
workspaceOld := ""
if old != nil {
nsOld := old.(*corev1.Namespace)
if nsOld.Labels != nil {
workspaceOld = nsOld.Labels[constants.WorkspaceLabelKey]
}
}
nsNew := new.(*corev1.Namespace)
workspaceNew := ""
if nsNew.Labels != nil {
workspaceNew = nsNew.Labels[constants.WorkspaceLabelKey]
}
if workspaceOld != workspaceNew {
c.nsQueue.Add(nsNew.Name)
}
}
func NewIPPoolController(
kubesphereInformers ksinformers.SharedInformerFactory,
kubernetesInformers k8sinformers.SharedInformerFactory,
client clientset.Interface,
kubesphereClient kubesphereclient.Interface,
provider ippool.Provider) *IPPoolController {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(func(format string, args ...interface{}) {
klog.Info(fmt.Sprintf(format, args))
})
broadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "ippool-controller"})
c := &IPPoolController{
eventBroadcaster: broadcaster,
eventRecorder: recorder,
ippoolQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ippool"),
nsQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ippool-ns"),
client: client,
kubesphereClient: kubesphereClient,
provider: provider,
}
c.ippoolInformer = kubesphereInformers.Network().V1alpha1().IPPools()
c.ippoolSynced = c.ippoolInformer.Informer().HasSynced
c.ipamblockInformer = kubesphereInformers.Network().V1alpha1().IPAMBlocks()
c.ipamblockSynced = c.ipamblockInformer.Informer().HasSynced
c.wsInformer = kubesphereInformers.Tenant().V1alpha1().Workspaces()
c.wsSynced = c.wsInformer.Informer().HasSynced
c.nsInformer = kubernetesInformers.Core().V1().Namespaces()
c.nsSynced = c.nsInformer.Informer().HasSynced
c.ippoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.enqueueIPPools,
UpdateFunc: func(old, new interface{}) {
_, defaultOld := old.(*networkv1alpha1.IPPool).Labels[networkv1alpha1.IPPoolDefaultLabel]
_, defaultNew := new.(*networkv1alpha1.IPPool).Labels[networkv1alpha1.IPPoolDefaultLabel]
if defaultOld != defaultNew {
nss, err := c.nsInformer.Lister().List(labels.Everything())
if err != nil {
return
}
for _, ns := range nss {
c.enqueueNamespace(nil, ns)
}
}
c.enqueueIPPools(new)
},
DeleteFunc: func(new interface{}) {
_, defaultNew := new.(*networkv1alpha1.IPPool).Labels[networkv1alpha1.IPPoolDefaultLabel]
if defaultNew {
nss, err := c.nsInformer.Lister().List(labels.Everything())
if err != nil {
return
}
for _, ns := range nss {
c.enqueueNamespace(nil, ns)
}
}
},
})
//just for update ippool status
c.ipamblockInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.enqueueIPAMBlocks,
UpdateFunc: func(old, new interface{}) {
c.enqueueIPAMBlocks(new)
},
DeleteFunc: c.enqueueIPAMBlocks,
})
c.wsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.enqueueWorkspace,
})
c.nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(new interface{}) {
c.enqueueNamespace(nil, new)
},
UpdateFunc: c.enqueueNamespace,
})
//register ippool webhook
webhooks.RegisterValidator(networkv1alpha1.SchemeGroupVersion.WithKind(networkv1alpha1.ResourceKindIPPool).String(),
&webhooks.ValidatorWrap{Obj: &networkv1alpha1.IPPool{}, Helper: c})
webhooks.RegisterDefaulter(corev1.SchemeGroupVersion.WithKind("Pod").String(),
&webhooks.DefaulterWrap{Obj: &corev1.Pod{}, Helper: provider})
return c
}

View File

@@ -1,156 +0,0 @@
/*
Copyright 2020 The KubeSphere authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ippool
import (
"context"
"flag"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2"
"kubesphere.io/api/network/v1alpha1"
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/controller/network/utils"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool/ipam"
)
func TestIPPoolSuit(t *testing.T) {
klog.InitFlags(nil)
flag.Set("logtostderr", "true")
flag.Set("v", "4")
flag.Parse()
klog.SetOutput(GinkgoWriter)
RegisterFailHandler(Fail)
RunSpecs(t, "IPPool Suite")
}
var _ = Describe("test ippool", func() {
pool := &v1alpha1.IPPool{
TypeMeta: v1.TypeMeta{},
ObjectMeta: v1.ObjectMeta{
Name: "testippool",
},
Spec: v1alpha1.IPPoolSpec{
Type: v1alpha1.VLAN,
CIDR: "192.168.0.0/24",
},
Status: v1alpha1.IPPoolStatus{},
}
ksclient := ksfake.NewSimpleClientset()
k8sclinet := k8sfake.NewSimpleClientset()
ksInformer := ksinformers.NewSharedInformerFactory(ksclient, 0)
k8sInformer := k8sinformers.NewSharedInformerFactory(k8sclinet, 0)
p := ippool.NewProvider(k8sInformer, ksclient, k8sclinet, v1alpha1.IPPoolTypeLocal, nil)
ipamClient := ipam.NewIPAMClient(ksclient, v1alpha1.VLAN)
c := NewIPPoolController(ksInformer, k8sInformer, k8sclinet, ksclient, p)
stopCh := make(chan struct{})
go ksInformer.Start(stopCh)
go k8sInformer.Start(stopCh)
go c.Start(context.Background())
It("test create ippool", func() {
clone := pool.DeepCopy()
clone.Spec.CIDR = "testxxx"
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
clone.Spec.CIDR = "192.168.0.0/24"
clone.Spec.RangeStart = "192.168.0.100"
clone.Spec.RangeEnd = "192.168.0.99"
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
clone.Spec.CIDR = "192.168.0.0/24"
clone.Spec.RangeStart = "192.168.3.100"
clone.Spec.RangeEnd = "192.168.3.111"
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
clone.Spec.CIDR = "192.168.0.0/24"
clone.Spec.BlockSize = 23
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
clone = pool.DeepCopy()
_, err := ksclient.NetworkV1alpha1().IPPools().Create(context.TODO(), clone, v1.CreateOptions{})
Expect(err).ShouldNot(HaveOccurred())
Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
if len(result.Labels) != 3 {
return false
}
if utils.NeedToAddFinalizer(result, v1alpha1.IPPoolFinalizer) {
return false
}
return true
}, 3*time.Second).Should(Equal(true))
clone = pool.DeepCopy()
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
})
It("test update ippool", func() {
old, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
new := old.DeepCopy()
new.Spec.CIDR = "192.168.1.0/24"
Expect(c.ValidateUpdate(old, new)).Should(HaveOccurred())
})
It("test ippool stats", func() {
ipamClient.AutoAssign(ipam.AutoAssignArgs{
HandleID: "testhandle",
Attrs: nil,
Pool: "testippool",
})
Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
return result.Status.Allocations == 1
}, 3*time.Second).Should(Equal(true))
})
It("test delete pool", func() {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
Expect(c.ValidateDelete(result)).Should(HaveOccurred())
ipamClient.ReleaseByHandle("testhandle")
Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
return result.Status.Allocations == 0
}, 3*time.Second).Should(Equal(true))
err := ksclient.NetworkV1alpha1().IPPools().Delete(context.TODO(), pool.Name, v1.DeleteOptions{})
Expect(err).ShouldNot(HaveOccurred())
blocks, _ := ksclient.NetworkV1alpha1().IPAMBlocks().List(context.TODO(), v1.ListOptions{})
Expect(len(blocks.Items)).Should(Equal(0))
})
})

View File

@@ -1,724 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nsnetworkpolicy
import (
"context"
"fmt"
"net"
"sort"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
uruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
v1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"kubesphere.io/api/network/v1alpha1"
workspacev1alpha1 "kubesphere.io/api/tenant/v1alpha1"
ksnetclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1"
nspolicy "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
workspace "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy/provider"
options "kubesphere.io/kubesphere/pkg/simple/client/network"
)
const (
//TODO use set to track service:map
//use period sync service label in NSNP
defaultSleepDuration = 60 * time.Second
defaultThread = 5
defaultSync = "5m"
//whether network isolate is enable in namespace
NamespaceNPAnnotationKey = "kubesphere.io/network-isolate"
NamespaceNPAnnotationEnabled = "enabled"
NodeNSNPAnnotationKey = "kubesphere.io/snat-node-ips"
AnnotationNPNAME = v1alpha1.NSNPPrefix + "network-isolate"
//TODO: configure it
DNSLocalIP = "169.254.25.10"
DNSPort = 53
DNSNamespace = "kube-system"
DNSServiceName = "kube-dns"
DNSServiceCoreDNS = "coredns"
)
// namespacenpController implements the Controller interface for managing kubesphere network policies
// and convery them to k8s NetworkPolicies, then syncing them to the provider.
type NSNetworkPolicyController struct {
client kubernetes.Interface
ksclient ksnetclient.NetworkV1alpha1Interface
informer nspolicy.NamespaceNetworkPolicyInformer
informerSynced cache.InformerSynced
//TODO support service in same namespace
serviceInformer v1.ServiceInformer
serviceInformerSynced cache.InformerSynced
nodeInformer v1.NodeInformer
nodeInformerSynced cache.InformerSynced
workspaceInformer workspace.WorkspaceInformer
workspaceInformerSynced cache.InformerSynced
namespaceInformer v1.NamespaceInformer
namespaceInformerSynced cache.InformerSynced
provider provider.NsNetworkPolicyProvider
options options.NSNPOptions
nsQueue workqueue.RateLimitingInterface
nsnpQueue workqueue.RateLimitingInterface
}
func stringToCIDR(ipStr string) (string, error) {
cidr := ""
if ip := net.ParseIP(ipStr); ip != nil {
if ip.To4() != nil {
cidr = ipStr + "/32"
} else {
cidr = ipStr + "/128"
}
} else {
return cidr, fmt.Errorf("ip string %s parse error\n", ipStr)
}
return cidr, nil
}
func generateDNSRule(nameServers []string) (netv1.NetworkPolicyEgressRule, error) {
var rule netv1.NetworkPolicyEgressRule
for _, nameServer := range nameServers {
cidr, err := stringToCIDR(nameServer)
if err != nil {
return rule, err
}
rule.To = append(rule.To, netv1.NetworkPolicyPeer{
IPBlock: &netv1.IPBlock{
CIDR: cidr,
},
})
}
protocolTCP := corev1.ProtocolTCP
protocolUDP := corev1.ProtocolUDP
dnsPort := intstr.FromInt(DNSPort)
rule.Ports = append(rule.Ports, netv1.NetworkPolicyPort{
Protocol: &protocolTCP,
Port: &dnsPort,
}, netv1.NetworkPolicyPort{
Protocol: &protocolUDP,
Port: &dnsPort,
})
return rule, nil
}
func (c *NSNetworkPolicyController) generateDNSServiceRule() (netv1.NetworkPolicyEgressRule, error) {
peer, ports, err := c.handlerPeerService(DNSNamespace, DNSServiceName, false)
if err != nil {
peer, ports, err = c.handlerPeerService(DNSNamespace, DNSServiceCoreDNS, false)
}
return netv1.NetworkPolicyEgressRule{
Ports: ports,
To: []netv1.NetworkPolicyPeer{peer},
}, err
}
func (c *NSNetworkPolicyController) handlerPeerService(namespace string, name string, ingress bool) (netv1.NetworkPolicyPeer, []netv1.NetworkPolicyPort, error) {
peerNP := netv1.NetworkPolicyPeer{}
var ports []netv1.NetworkPolicyPort
service, err := c.serviceInformer.Lister().Services(namespace).Get(name)
if err != nil {
return peerNP, nil, err
}
peerNP.PodSelector = new(metav1.LabelSelector)
peerNP.NamespaceSelector = new(metav1.LabelSelector)
if len(service.Spec.Selector) <= 0 {
return peerNP, nil, fmt.Errorf("service %s/%s has no podselect", namespace, name)
}
peerNP.PodSelector.MatchLabels = make(map[string]string)
for key, value := range service.Spec.Selector {
peerNP.PodSelector.MatchLabels[key] = value
}
peerNP.NamespaceSelector.MatchLabels = make(map[string]string)
peerNP.NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = namespace
//only allow traffic to service exposed ports
if !ingress {
ports = make([]netv1.NetworkPolicyPort, 0)
for _, port := range service.Spec.Ports {
protocol := port.Protocol
portIntString := intstr.FromInt(int(port.Port))
ports = append(ports, netv1.NetworkPolicyPort{
Protocol: &protocol,
Port: &portIntString,
})
}
}
return peerNP, ports, err
}
func (c *NSNetworkPolicyController) convertPeer(peer v1alpha1.NetworkPolicyPeer, ingress bool) (netv1.NetworkPolicyPeer, []netv1.NetworkPolicyPort, error) {
peerNP := netv1.NetworkPolicyPeer{}
var ports []netv1.NetworkPolicyPort
if peer.ServiceSelector != nil {
namespace := peer.ServiceSelector.Namespace
name := peer.ServiceSelector.Name
return c.handlerPeerService(namespace, name, ingress)
} else if peer.NamespaceSelector != nil {
name := peer.NamespaceSelector.Name
peerNP.NamespaceSelector = new(metav1.LabelSelector)
peerNP.NamespaceSelector.MatchLabels = make(map[string]string)
peerNP.NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = name
} else if peer.IPBlock != nil {
peerNP.IPBlock = peer.IPBlock
} else {
klog.Errorf("Invalid nsnp peer %v\n", peer)
return peerNP, nil, fmt.Errorf("Invalid nsnp peer %v\n", peer)
}
return peerNP, ports, nil
}
func (c *NSNetworkPolicyController) convertToK8sNP(n *v1alpha1.NamespaceNetworkPolicy) (*netv1.NetworkPolicy, error) {
np := &netv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: v1alpha1.NSNPPrefix + n.Name,
Namespace: n.Namespace,
},
Spec: netv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: make([]netv1.PolicyType, 0),
},
}
if n.Spec.Egress != nil {
np.Spec.Egress = make([]netv1.NetworkPolicyEgressRule, 0)
for _, egress := range n.Spec.Egress {
tmpRule := netv1.NetworkPolicyEgressRule{}
for _, peer := range egress.To {
peer, ports, err := c.convertPeer(peer, false)
if err != nil {
return nil, err
}
if ports != nil {
np.Spec.Egress = append(np.Spec.Egress, netv1.NetworkPolicyEgressRule{
Ports: ports,
To: []netv1.NetworkPolicyPeer{peer},
})
continue
}
tmpRule.To = append(tmpRule.To, peer)
}
tmpRule.Ports = egress.Ports
if tmpRule.To == nil {
continue
}
np.Spec.Egress = append(np.Spec.Egress, tmpRule)
}
np.Spec.PolicyTypes = append(np.Spec.PolicyTypes, netv1.PolicyTypeEgress)
}
if n.Spec.Ingress != nil {
np.Spec.Ingress = make([]netv1.NetworkPolicyIngressRule, 0)
for _, ingress := range n.Spec.Ingress {
tmpRule := netv1.NetworkPolicyIngressRule{}
for _, peer := range ingress.From {
peer, ports, err := c.convertPeer(peer, true)
if err != nil {
return nil, err
}
if ports != nil {
np.Spec.Ingress = append(np.Spec.Ingress, netv1.NetworkPolicyIngressRule{
Ports: ports,
From: []netv1.NetworkPolicyPeer{peer},
})
}
tmpRule.From = append(tmpRule.From, peer)
}
tmpRule.Ports = ingress.Ports
np.Spec.Ingress = append(np.Spec.Ingress, tmpRule)
}
np.Spec.PolicyTypes = append(np.Spec.PolicyTypes, netv1.PolicyTypeIngress)
}
return np, nil
}
func (c *NSNetworkPolicyController) generateNodeRule() (netv1.NetworkPolicyIngressRule, error) {
var (
rule netv1.NetworkPolicyIngressRule
ips []string
)
nodes, err := c.nodeInformer.Lister().List(labels.Everything())
if err != nil {
return rule, err
}
for _, node := range nodes {
snatIPs := node.Annotations[NodeNSNPAnnotationKey]
if snatIPs != "" {
ips = append(ips, strings.Split(snatIPs, ";")...)
}
}
sort.Strings(ips)
for _, ip := range ips {
cidr, err := stringToCIDR(ip)
if err != nil {
continue
}
rule.From = append(rule.From, netv1.NetworkPolicyPeer{
IPBlock: &netv1.IPBlock{
CIDR: cidr,
},
})
}
return rule, nil
}
func (c *NSNetworkPolicyController) generateNSNP(workspace string, namespace string, matchWorkspace bool) *netv1.NetworkPolicy {
policy := &netv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: AnnotationNPNAME,
Namespace: namespace,
},
Spec: netv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: make([]netv1.PolicyType, 0),
Ingress: []netv1.NetworkPolicyIngressRule{{
From: []netv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{},
},
}},
}},
},
}
policy.Spec.PolicyTypes = append(policy.Spec.PolicyTypes, netv1.PolicyTypeIngress)
if matchWorkspace {
policy.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[constants.WorkspaceLabelKey] = workspace
} else {
policy.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = namespace
}
for _, allowedIngressNamespace := range c.options.AllowedIngressNamespaces {
defaultAllowedIngress := netv1.NetworkPolicyPeer{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
constants.NamespaceLabelKey: allowedIngressNamespace,
},
},
}
policy.Spec.Ingress[0].From = append(policy.Spec.Ingress[0].From, defaultAllowedIngress)
}
return policy
}
func (c *NSNetworkPolicyController) nsEnqueue(ns *corev1.Namespace) {
key, err := cache.MetaNamespaceKeyFunc(ns)
if err != nil {
uruntime.HandleError(fmt.Errorf("Get namespace key %s failed", ns.Name))
return
}
workspaceName := ns.Labels[constants.WorkspaceLabelKey]
if workspaceName == "" {
return
}
c.nsQueue.Add(key)
}
func (c *NSNetworkPolicyController) addWorkspace(newObj interface{}) {
new := newObj.(*workspacev1alpha1.Workspace)
label := labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: new.Name})
nsList, err := c.namespaceInformer.Lister().List(label)
if err != nil {
klog.Errorf("Error while list namespace by label %s", label.String())
return
}
for _, ns := range nsList {
c.nsEnqueue(ns)
}
}
func (c *NSNetworkPolicyController) addNode(newObj interface{}) {
nsList, err := c.namespaceInformer.Lister().List(labels.Everything())
if err != nil {
klog.Errorf("Error while list namespace by label")
return
}
for _, ns := range nsList {
c.nsEnqueue(ns)
}
}
func (c *NSNetworkPolicyController) addNamespace(obj interface{}) {
ns := obj.(*corev1.Namespace)
workspaceName := ns.Labels[constants.WorkspaceLabelKey]
if workspaceName == "" {
return
}
c.nsEnqueue(ns)
}
func namespaceNetworkIsolateEnabled(ns *corev1.Namespace) bool {
if ns.Annotations != nil && ns.Annotations[NamespaceNPAnnotationKey] == NamespaceNPAnnotationEnabled {
return true
}
return false
}
func (c *NSNetworkPolicyController) syncNs(key string) error {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.Errorf("Not a valid controller key %s, %#v", key, err)
return err
}
ns, err := c.namespaceInformer.Lister().Get(name)
if err != nil {
// not found, possibly been deleted
if errors.IsNotFound(err) {
klog.V(2).Infof("Namespace %v has been deleted", key)
return nil
}
return err
}
workspaceName := ns.Labels[constants.WorkspaceLabelKey]
if workspaceName == "" {
return nil
}
wksp, err := c.workspaceInformer.Lister().Get(workspaceName)
if err != nil {
//Should not be here
if errors.IsNotFound(err) {
klog.V(2).Infof("Workspace %v has been deleted", workspaceName)
return nil
}
return err
}
matchWorkspace := false
delete := false
nsnpList, err := c.informer.Lister().NamespaceNetworkPolicies(ns.Name).List(labels.Everything())
if namespaceNetworkIsolateEnabled(ns) {
matchWorkspace = false
} else if workspaceNetworkIsolationEnabled(wksp) {
matchWorkspace = true
} else {
delete = true
}
if delete || matchWorkspace {
//delete all namespace np when networkisolate not active
if err == nil && len(nsnpList) > 0 {
if c.ksclient.NamespaceNetworkPolicies(ns.Name).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}) != nil {
klog.Errorf("Error when delete all nsnps in namespace %s", ns.Name)
}
}
}
policy := c.generateNSNP(workspaceName, ns.Name, matchWorkspace)
if shouldAddDNSRule(nsnpList) {
ruleDNS, err := generateDNSRule([]string{DNSLocalIP})
if err != nil {
return err
}
policy.Spec.Egress = append(policy.Spec.Egress, ruleDNS)
ruleDNSService, err := c.generateDNSServiceRule()
if err == nil {
policy.Spec.Egress = append(policy.Spec.Egress, ruleDNSService)
} else {
klog.Warningf("Cannot handle service %s or %s", DNSServiceName, DNSServiceCoreDNS)
}
policy.Spec.PolicyTypes = append(policy.Spec.PolicyTypes, netv1.PolicyTypeEgress)
}
ruleNode, err := c.generateNodeRule()
if err != nil {
return err
}
if len(ruleNode.From) > 0 {
policy.Spec.Ingress = append(policy.Spec.Ingress, ruleNode)
}
if delete {
c.provider.Delete(c.provider.GetKey(AnnotationNPNAME, ns.Name))
} else {
err = c.provider.Set(policy)
if err != nil {
klog.Errorf("Error while converting %#v to provider's network policy.", policy)
return err
}
}
return nil
}
func shouldAddDNSRule(nsnpList []*v1alpha1.NamespaceNetworkPolicy) bool {
for _, nsnp := range nsnpList {
if len(nsnp.Spec.Egress) > 0 {
return true
}
}
return false
}
func (c *NSNetworkPolicyController) nsWorker() {
for c.processNsWorkItem() {
}
}
func (c *NSNetworkPolicyController) processNsWorkItem() bool {
key, quit := c.nsQueue.Get()
if quit {
return false
}
defer c.nsQueue.Done(key)
if err := c.syncNs(key.(string)); err != nil {
klog.Errorf("Error when syncns %s", err)
}
return true
}
func (c *NSNetworkPolicyController) nsnpEnqueue(obj interface{}) {
nsnp := obj.(*v1alpha1.NamespaceNetworkPolicy)
key, err := cache.MetaNamespaceKeyFunc(nsnp)
if err != nil {
uruntime.HandleError(fmt.Errorf("get namespace network policy key %s failed", nsnp.Name))
return
}
c.nsnpQueue.Add(key)
}
func (c *NSNetworkPolicyController) syncNSNP(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.Errorf("Not a valid controller key %s, %#v", key, err)
return err
}
c.nsQueue.Add(namespace)
nsnp, err := c.informer.Lister().NamespaceNetworkPolicies(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
klog.V(4).Infof("NSNP %v has been deleted", key)
c.provider.Delete(c.provider.GetKey(v1alpha1.NSNPPrefix+name, namespace))
return nil
}
return err
}
np, err := c.convertToK8sNP(nsnp)
if err != nil {
klog.Errorf("Error while convert nsnp to k8snp: %s", err)
return err
}
err = c.provider.Set(np)
if err != nil {
klog.Errorf("Error while set provider: %s", err)
return err
}
return nil
}
func (c *NSNetworkPolicyController) nsNPWorker() {
for c.processNSNPWorkItem() {
}
}
func (c *NSNetworkPolicyController) processNSNPWorkItem() bool {
key, quit := c.nsnpQueue.Get()
if quit {
return false
}
defer c.nsnpQueue.Done(key)
c.syncNSNP(key.(string))
return true
}
func workspaceNetworkIsolationEnabled(wksp *workspacev1alpha1.Workspace) bool {
if wksp.Spec.NetworkIsolation != nil && *wksp.Spec.NetworkIsolation {
return true
}
return false
}
// NewnamespacenpController returns a controller which manages NSNSP objects.
func NewNSNetworkPolicyController(
client kubernetes.Interface,
ksclient ksnetclient.NetworkV1alpha1Interface,
nsnpInformer nspolicy.NamespaceNetworkPolicyInformer,
serviceInformer v1.ServiceInformer,
nodeInformer v1.NodeInformer,
workspaceInformer workspace.WorkspaceInformer,
namespaceInformer v1.NamespaceInformer,
policyProvider provider.NsNetworkPolicyProvider,
options options.NSNPOptions) *NSNetworkPolicyController {
controller := &NSNetworkPolicyController{
client: client,
ksclient: ksclient,
informer: nsnpInformer,
informerSynced: nsnpInformer.Informer().HasSynced,
serviceInformer: serviceInformer,
serviceInformerSynced: serviceInformer.Informer().HasSynced,
nodeInformer: nodeInformer,
nodeInformerSynced: nodeInformer.Informer().HasSynced,
workspaceInformer: workspaceInformer,
workspaceInformerSynced: workspaceInformer.Informer().HasSynced,
namespaceInformer: namespaceInformer,
namespaceInformerSynced: namespaceInformer.Informer().HasSynced,
provider: policyProvider,
nsQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
nsnpQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespacenp"),
options: options,
}
workspaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.addWorkspace,
UpdateFunc: func(oldObj, newObj interface{}) {
old := oldObj.(*workspacev1alpha1.Workspace)
new := newObj.(*workspacev1alpha1.Workspace)
if workspaceNetworkIsolationEnabled(old) == workspaceNetworkIsolationEnabled(new) {
return
}
controller.addWorkspace(newObj)
},
})
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.addNode,
UpdateFunc: func(oldObj, newObj interface{}) {
old := oldObj.(*corev1.Node)
new := newObj.(*corev1.Node)
if old.Annotations[NodeNSNPAnnotationKey] == new.Annotations[NodeNSNPAnnotationKey] {
return
}
controller.addNode(newObj)
},
})
namespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.addNamespace,
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
old := oldObj.(*corev1.Namespace)
new := newObj.(*corev1.Namespace)
if old.Annotations[NamespaceNPAnnotationKey] == new.Annotations[NamespaceNPAnnotationKey] {
return
}
controller.addNamespace(newObj)
},
})
nsnpInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: controller.nsnpEnqueue,
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
controller.nsnpEnqueue(newObj)
},
DeleteFunc: controller.nsnpEnqueue,
}, defaultSleepDuration)
return controller
}
func (c *NSNetworkPolicyController) Start(ctx context.Context) error {
return c.Run(defaultThread, defaultSync, ctx.Done())
}
// Run starts the controller.
func (c *NSNetworkPolicyController) Run(threadiness int, reconcilerPeriod string, stopCh <-chan struct{}) error {
defer uruntime.HandleCrash()
defer c.nsQueue.ShutDown()
defer c.nsnpQueue.ShutDown()
klog.Info("Waiting to sync with Kubernetes API (NSNP)")
if ok := cache.WaitForCacheSync(stopCh, c.informerSynced, c.serviceInformerSynced, c.workspaceInformerSynced, c.namespaceInformerSynced, c.nodeInformerSynced); !ok {
return fmt.Errorf("Failed to wait for caches to sync")
}
klog.Info("Finished syncing with Kubernetes API (NSNP)")
// Start a number of worker threads to read from the queue. Each worker
// will pull keys off the resource cache event queue and sync them to the
// K8s datastore.
for i := 0; i < threadiness; i++ {
go wait.Until(c.nsWorker, time.Second, stopCh)
go wait.Until(c.nsNPWorker, time.Second, stopCh)
}
//Work to sync K8s NetworkPolicy
go c.provider.Start(stopCh)
klog.Info("NSNP controller is now running")
<-stopCh
klog.Info("Stopping NSNP controller")
return nil
}

View File

@@ -1,36 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nsnetworkpolicy
import (
"flag"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog/v2"
)
func TestNsnetworkpolicy(t *testing.T) {
klog.InitFlags(nil)
flag.Set("logtostderr", "true")
flag.Set("v", "4")
flag.Parse()
klog.SetOutput(GinkgoWriter)
RegisterFailHandler(Fail)
RunSpecs(t, "Nsnetworkpolicy Suite")
}

View File

@@ -1,373 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nsnetworkpolicy
import (
"context"
"fmt"
"reflect"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/util/yaml"
kubeinformers "k8s.io/client-go/informers"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2"
netv1alpha1 "kubesphere.io/api/network/v1alpha1"
wkspv1alpha1 "kubesphere.io/api/tenant/v1alpha1"
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy/provider"
options "kubesphere.io/kubesphere/pkg/simple/client/network"
)
var (
c *NSNetworkPolicyController
stopCh chan struct{}
alwaysReady = func() bool { return true }
)
const (
workspaceNP = `
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: networkisolate
namespace: %s
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
%s: %s
- namespaceSelector:
matchLabels:
"kubesphere.io/namespace" : "kubesphere-monitoring-system"
policyTypes:
- Ingress`
serviceTmp = `
apiVersion: v1
kind: Service
metadata:
name: myservice
namespace: testns
spec:
clusterIP: 10.0.0.1
selector:
app: mylbapp
ports:
- protocol: TCP
port: 80
targetPort: 9376
`
workspaceTmp = `
apiVersion: tenant.kubesphere.io/v1alpha1
kind: Workspace
metadata:
annotations:
kubesphere.io/creator: admin
name: testworkspace
spec:
manager: admin
networkIsolation: true
status: {}
`
nsTmp = `
apiVersion: v1
kind: Namespace
metadata:
labels:
kubesphere.io/workspace: testworkspace
name: testns
spec:
finalizers:
- kubernetes
`
)
func StringToObject(data string, obj interface{}) error {
reader := strings.NewReader(data)
return yaml.NewYAMLOrJSONDecoder(reader, 10).Decode(obj)
}
var _ = Describe("Nsnetworkpolicy", func() {
BeforeEach(func() {
stopCh = make(chan struct{})
calicoProvider := provider.NewFakeNetworkProvider()
kubeClient := kubefake.NewSimpleClientset()
ksClient := ksfake.NewSimpleClientset()
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
ksInformer := ksinformers.NewSharedInformerFactory(ksClient, 0)
nsnpInformer := ksInformer.Network().V1alpha1().NamespaceNetworkPolicies()
serviceInformer := kubeInformer.Core().V1().Services()
nodeInforemer := kubeInformer.Core().V1().Nodes()
workspaceInformer := ksInformer.Tenant().V1alpha1().Workspaces()
namespaceInformer := kubeInformer.Core().V1().Namespaces()
nsnpOptions := options.NewNetworkOptions()
nsnpOptions.NSNPOptions.AllowedIngressNamespaces = append(nsnpOptions.NSNPOptions.AllowedIngressNamespaces, "kubesphere-monitoring-system")
c = NewNSNetworkPolicyController(kubeClient, ksClient.NetworkV1alpha1(),
nsnpInformer, serviceInformer, nodeInforemer,
workspaceInformer, namespaceInformer, calicoProvider, nsnpOptions.NSNPOptions)
serviceObj := &corev1.Service{}
Expect(StringToObject(serviceTmp, serviceObj)).ShouldNot(HaveOccurred())
Expect(serviceInformer.Informer().GetIndexer().Add(serviceObj)).ShouldNot(HaveOccurred())
nsObj := &corev1.Namespace{}
Expect(StringToObject(nsTmp, nsObj)).ShouldNot(HaveOccurred())
namespaceInformer.Informer().GetIndexer().Add(nsObj)
workspaceObj := &wkspv1alpha1.Workspace{}
Expect(StringToObject(workspaceTmp, workspaceObj)).ShouldNot(HaveOccurred())
workspaceInformer.Informer().GetIndexer().Add(workspaceObj)
c.namespaceInformerSynced = alwaysReady
c.serviceInformerSynced = alwaysReady
c.workspaceInformerSynced = alwaysReady
c.informerSynced = alwaysReady
go c.Start(context.Background())
})
It("test func namespaceNetworkIsolateEnabled", func() {
ns := &corev1.Namespace{}
Expect(namespaceNetworkIsolateEnabled(ns)).To(BeFalse())
ns.Annotations = make(map[string]string)
Expect(namespaceNetworkIsolateEnabled(ns)).To(BeFalse())
ns.Annotations[NamespaceNPAnnotationKey] = NamespaceNPAnnotationEnabled
Expect(namespaceNetworkIsolateEnabled(ns)).To(BeTrue())
})
It("test func workspaceNetworkIsolationEnabled", func() {
value := false
wksp := &wkspv1alpha1.Workspace{}
Expect(workspaceNetworkIsolationEnabled(wksp)).To(BeFalse())
wksp.Spec.NetworkIsolation = &value
Expect(workspaceNetworkIsolationEnabled(wksp)).To(BeFalse())
value = true
Expect(workspaceNetworkIsolationEnabled(wksp)).To(BeTrue())
})
It("Should create ns networkisolate np correctly in workspace", func() {
objSrt := fmt.Sprintf(workspaceNP, "testns", constants.WorkspaceLabelKey, "testworkspace")
obj := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
policy := c.generateNSNP("testworkspace", "testns", true)
Expect(reflect.DeepEqual(obj.Spec, policy.Spec)).To(BeTrue())
})
It("Should create ns networkisolate np correctly in ns", func() {
objSrt := fmt.Sprintf(workspaceNP, "testns", constants.NamespaceLabelKey, "testns")
obj := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
policy := c.generateNSNP("testworkspace", "testns", false)
Expect(reflect.DeepEqual(obj.Spec, policy.Spec)).To(BeTrue())
})
It("test func convertToK8sNP", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: namespaceIPblockNP
namespace: testns
spec:
ingress:
- from:
- ipBlock:
cidr: 172.0.0.1/16
ports:
- protocol: TCP
port: 80
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
policy, err := c.convertToK8sNP(obj)
objSrt = `
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: IPblockNP
namespace: testns
spec:
ingress:
- from:
- ipBlock:
cidr: 172.0.0.1/16
ports:
- protocol: TCP
port: 80
policyTypes:
- Ingress
`
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
Expect(err).ShouldNot(HaveOccurred())
Expect(reflect.DeepEqual(obj2.Spec, policy.Spec)).To(BeTrue())
})
It("test func convertToK8sNP with namespace", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: testnamespace
namespace: testns2
spec:
ingress:
- from:
- namespace:
name: testns
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
np, err := c.convertToK8sNP(obj)
Expect(err).ShouldNot(HaveOccurred())
objTmp := `
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: testnamespace
namespace: testns2
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
%s: %s
policyTypes:
- Ingress`
objSrt = fmt.Sprintf(objTmp, constants.NamespaceLabelKey, "testns")
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
Expect(reflect.DeepEqual(np.Spec, obj2.Spec)).To(BeTrue())
})
It("test func convertToK8sNP with service ingress", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: testnamespace
namespace: testns2
spec:
ingress:
- from:
- service:
name: myservice
namespace: testns
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
np, err := c.convertToK8sNP(obj)
Expect(err).ShouldNot(HaveOccurred())
objSrt = `
apiVersion: "networking.k8s.io/v1"
kind: NetworkPolicy
metadata:
name: networkisolate
namespace: testns
spec:
podSelector: {}
ingress:
- from:
- podSelector:
matchLabels:
app: mylbapp
namespaceSelector:
matchLabels:
kubesphere.io/namespace: testns
policyTypes:
- Ingress
`
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
klog.Errorf("\n%v\n%v\n", np.Spec, obj2.Spec)
Expect(reflect.DeepEqual(np.Spec, obj2.Spec)).To(BeTrue())
})
It("test func convertToK8sNP with service egress", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: testnamespace
namespace: testns2
spec:
egress:
- To:
- service:
name: myservice
namespace: testns
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
np, err := c.convertToK8sNP(obj)
Expect(err).ShouldNot(HaveOccurred())
objSrt = `
apiVersion: "networking.k8s.io/v1"
kind: NetworkPolicy
metadata:
name: networkisolate
namespace: testns
spec:
podSelector: {}
egress:
- to:
- podSelector:
matchLabels:
app: mylbapp
namespaceSelector:
matchLabels:
kubesphere.io/namespace: testns
ports:
- protocol: TCP
port: 80
policyTypes:
- Egress
`
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
klog.Errorf("\n%v\n%v\n", np.Spec, obj2.Spec)
Expect(reflect.DeepEqual(np.Spec, obj2.Spec)).To(BeTrue())
})
AfterEach(func() {
close(stopCh)
})
})

View File

@@ -1,65 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"fmt"
api "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/calico/kube-controllers/pkg/converter"
constants "github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion"
v1 "k8s.io/api/networking/v1"
)
func NewFakeNetworkProvider() *FakeNetworkProvider {
f := new(FakeNetworkProvider)
f.NSNPData = make(map[string]*api.NetworkPolicy)
f.policyConverter = converter.NewPolicyConverter()
return f
}
type FakeNetworkProvider struct {
NSNPData map[string]*api.NetworkPolicy
policyConverter converter.Converter
}
func (f *FakeNetworkProvider) Delete(key string) {
delete(f.NSNPData, key)
}
func (f *FakeNetworkProvider) Start(stopCh <-chan struct{}) {
}
func (f *FakeNetworkProvider) Set(np *v1.NetworkPolicy) error {
policy, err := f.policyConverter.Convert(np)
if err != nil {
return err
}
// Add to cache.
k := f.policyConverter.GetKey(policy)
tmp := policy.(api.NetworkPolicy)
f.NSNPData[k] = &tmp
return nil
}
func (f *FakeNetworkProvider) GetKey(name, nsname string) string {
policyName := fmt.Sprintf(constants.K8sNetworkPolicyNamePrefix + name)
return fmt.Sprintf("%s/%s", nsname, policyName)
}

View File

@@ -1,27 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import netv1 "k8s.io/api/networking/v1"
// NsNetworkPolicyProvider is a interface to let different cnis to implement our api
type NsNetworkPolicyProvider interface {
Delete(key string)
Set(policy *netv1.NetworkPolicy) error
Start(stopCh <-chan struct{})
GetKey(name, nsname string) string
}

View File

@@ -1,269 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"context"
"fmt"
"reflect"
"strings"
"sync"
"time"
rcache "github.com/projectcalico/calico/kube-controllers/pkg/cache"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
uruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informerv1 "k8s.io/client-go/informers/networking/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"kubesphere.io/api/network/v1alpha1"
)
const (
defaultSyncTime = 1 * time.Minute
)
func (c *k8sPolicyController) GetKey(name, nsname string) string {
return fmt.Sprintf("%s/%s", nsname, name)
}
func getkey(key string) (string, string) {
strs := strings.Split(key, "/")
return strs[0], strs[1]
}
// policyController implements the Controller interface for managing Kubernetes network policies
// and syncing them to the k8s datastore as NetworkPolicies.
type k8sPolicyController struct {
client kubernetes.Interface
informer informerv1.NetworkPolicyInformer
ctx context.Context
resourceCache rcache.ResourceCache
hasSynced cache.InformerSynced
}
func (c *k8sPolicyController) Start(stopCh <-chan struct{}) {
c.run(5, "5m", stopCh)
}
func (c *k8sPolicyController) Set(np *netv1.NetworkPolicy) error {
// Add to cache.
k := c.GetKey(np.Name, np.Namespace)
c.resourceCache.Set(k, *np)
return nil
}
func (c *k8sPolicyController) Delete(key string) {
c.resourceCache.Delete(key)
}
// Run starts the controller.
func (c *k8sPolicyController) run(threadiness int, reconcilerPeriod string, stopCh <-chan struct{}) {
defer uruntime.HandleCrash()
// Let the workers stop when we are done
workqueue := c.resourceCache.GetQueue()
defer workqueue.ShutDown()
// Wait until we are in sync with the Kubernetes API before starting the
// resource cache.
klog.Info("Waiting to sync with Kubernetes API (NetworkPolicy)")
//nolint:staticcheck
if ok := cache.WaitForCacheSync(stopCh, c.hasSynced); !ok {
}
klog.Infof("Finished syncing with Kubernetes API (NetworkPolicy)")
// Start the resource cache - this will trigger the queueing of any keys
// that are out of sync onto the resource cache event queue.
c.resourceCache.Run(reconcilerPeriod)
// Start a number of worker threads to read from the queue. Each worker
// will pull keys off the resource cache event queue and sync them to the
// k8s datastore.
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("NetworkPolicy controller is now running")
<-stopCh
klog.Info("Stopping NetworkPolicy controller")
}
func (c *k8sPolicyController) runWorker() {
for c.processNextItem() {
}
}
// processNextItem waits for an event on the output queue from the resource cache and syncs
// any received keys to the datastore.
func (c *k8sPolicyController) processNextItem() bool {
// Wait until there is a new item in the work queue.
workqueue := c.resourceCache.GetQueue()
key, quit := workqueue.Get()
if quit {
return false
}
// Sync the object to the k8s datastore.
if err := c.syncToDatastore(key.(string)); err != nil {
c.handleErr(err, key.(string))
}
// Indicate that we're done processing this key, allowing for safe parallel processing such that
// two objects with the same key are never processed in parallel.
workqueue.Done(key)
return true
}
// syncToDatastore syncs the given update to the k8s datastore. The provided key can be used to
// find the corresponding resource within the resource cache. If the resource for the provided key
// exists in the cache, then the value should be written to the datastore. If it does not exist
// in the cache, then it should be deleted from the datastore.
func (c *k8sPolicyController) syncToDatastore(key string) error {
// Check if it exists in the controller's cache.
obj, exists := c.resourceCache.Get(key)
if !exists {
// The object no longer exists - delete from the datastore.
klog.Infof("Deleting NetworkPolicy %s from k8s datastore", key)
ns, name := getkey(key)
err := c.client.NetworkingV1().NetworkPolicies(ns).Delete(context.Background(), name, metav1.DeleteOptions{})
if errors.IsNotFound(err) {
return nil
}
return err
} else {
// The object exists - update the datastore to reflect.
klog.Infof("Create/Update NetworkPolicy %s in k8s datastore", key)
p := obj.(netv1.NetworkPolicy)
// Lookup to see if this object already exists in the datastore.
gp, err := c.informer.Lister().NetworkPolicies(p.Namespace).Get(p.Name)
if err != nil {
if !errors.IsNotFound(err) {
klog.Warningf("Failed to get NetworkPolicy %s from datastore", key)
return err
}
// Doesn't exist - create it.
_, err := c.client.NetworkingV1().NetworkPolicies(p.Namespace).Create(context.Background(), &p, metav1.CreateOptions{})
if err != nil {
klog.Warningf("Failed to create NetworkPolicy %s", key)
return err
}
klog.Infof("Successfully created NetworkPolicy %s", key)
return nil
}
klog.V(4).Infof("New NetworkPolicy %s/%s %+v\n", p.Namespace, p.Name, p.Spec)
klog.V(4).Infof("Old NetworkPolicy %s/%s %+v\n", gp.Namespace, gp.Name, gp.Spec)
// The policy already exists, update it and write it back to the datastore.
gp.Spec = p.Spec
_, err = c.client.NetworkingV1().NetworkPolicies(p.Namespace).Update(context.Background(), gp, metav1.UpdateOptions{})
if err != nil {
klog.Warningf("Failed to update NetworkPolicy %s", key)
return err
}
klog.Infof("Successfully updated NetworkPolicy %s", key)
return nil
}
}
// handleErr handles errors which occur while processing a key received from the resource cache.
// For a given error, we will re-queue the key in order to retry the datastore sync up to 5 times,
// at which point the update is dropped.
func (c *k8sPolicyController) handleErr(err error, key string) {
workqueue := c.resourceCache.GetQueue()
if err == nil {
// Forget about the #AddRateLimited history of the key on every successful synchronization.
// This ensures that future processing of updates for this key is not delayed because of
// an outdated error history.
workqueue.Forget(key)
return
}
// This controller retries 5 times if something goes wrong. After that, it stops trying.
if workqueue.NumRequeues(key) < 5 {
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
klog.Errorf("Error syncing NetworkPolicy %v: %v", key, err)
workqueue.AddRateLimited(key)
return
}
workqueue.Forget(key)
// Report to an external entity that, even after several retries, we could not successfully process this key
uruntime.HandleError(err)
klog.Errorf("Dropping NetworkPolicy %q out of the queue: %v", key, err)
}
// NewNsNetworkPolicyProvider sync k8s NetworkPolicy
func NewNsNetworkPolicyProvider(client kubernetes.Interface, npInformer informerv1.NetworkPolicyInformer) (NsNetworkPolicyProvider, error) {
var once sync.Once
c := &k8sPolicyController{
client: client,
informer: npInformer,
ctx: context.Background(),
hasSynced: npInformer.Informer().HasSynced,
}
// Function returns map of policyName:policy stored by policy controller
// in datastore.
listFunc := func() (map[string]interface{}, error) {
//Wait cache be set by NSNP Controller, otherwise NetworkPolicy will be delete
//by mistake
once.Do(func() {
time.Sleep(defaultSyncTime)
})
// Get all policies from datastore
//TODO filter np not belong to kubesphere
policies, err := npInformer.Lister().List(labels.Everything())
if err != nil {
return nil, err
}
// Filter in only objects that are written by policy controller.
m := make(map[string]interface{})
for _, policy := range policies {
if strings.HasPrefix(policy.Name, v1alpha1.NSNPPrefix) {
policy.ObjectMeta = metav1.ObjectMeta{Name: policy.Name, Namespace: policy.Namespace}
k := c.GetKey(policy.Name, policy.Namespace)
m[k] = *policy
}
}
klog.Infof("Found %d policies in k8s datastore:", len(m))
return m, nil
}
cacheArgs := rcache.ResourceCacheArgs{
ListFunc: listFunc,
ObjectType: reflect.TypeOf(netv1.NetworkPolicy{}),
}
c.resourceCache = rcache.NewResourceCache(cacheArgs)
return c, nil
}

View File

@@ -1,85 +0,0 @@
// Copyright 2022 The KubeSphere Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// IsDeletionCandidate checks if object is candidate to be deleted
func IsDeletionCandidate(obj metav1.Object, finalizer string) bool {
return obj.GetDeletionTimestamp() != nil && ContainsString(obj.GetFinalizers(),
finalizer, nil)
}
// NeedToAddFinalizer checks if need to add finalizer to object
func NeedToAddFinalizer(obj metav1.Object, finalizer string) bool {
return obj.GetDeletionTimestamp() == nil && !ContainsString(obj.GetFinalizers(),
finalizer, nil)
}
// CopyStrings copies the contents of the specified string slice
// into a new slice.
func CopyStrings(s []string) []string {
if s == nil {
return nil
}
c := make([]string, len(s))
copy(c, s)
return c
}
// SortStrings sorts the specified string slice in place. It returns the same
// slice that was provided in order to facilitate method chaining.
func SortStrings(s []string) []string {
sort.Strings(s)
return s
}
// ContainsString checks if a given slice of strings contains the provided string.
// If a modifier func is provided, it is called with the slice item before the comparation.
func ContainsString(slice []string, s string, modifier func(s string) string) bool {
for _, item := range slice {
if item == s {
return true
}
if modifier != nil && modifier(item) == s {
return true
}
}
return false
}
// RemoveString returns a newly created []string that contains all items from slice that
// are not equal to s and modifier(s) in case modifier func is provided.
func RemoveString(slice []string, s string, modifier func(s string) string) []string {
newSlice := make([]string, 0)
for _, item := range slice {
if item == s {
continue
}
if modifier != nil && modifier(item) == s {
continue
}
newSlice = append(newSlice, item)
}
if len(newSlice) == 0 {
// Sanitize for unit tests so we don't need to distinguish empty array
// and nil.
newSlice = nil
}
return newSlice
}

View File

@@ -1,110 +0,0 @@
// Copyright 2022 The KubeSphere Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhooks
import (
"context"
"encoding/json"
"net/http"
"sync"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// Defaulter defines functions for setting defaults on resources
type Defaulter interface {
Default(obj runtime.Object) error
}
type DefaulterWrap struct {
Obj runtime.Object
Helper Defaulter
}
type MutatingHandler struct {
C client.Client
decoder *admission.Decoder
}
var _ admission.DecoderInjector = &MutatingHandler{}
// InjectDecoder injects the decoder into a MutatingHandler.
func (h *MutatingHandler) InjectDecoder(d *admission.Decoder) error {
h.decoder = d
return nil
}
type defaulters struct {
ds map[string]*DefaulterWrap
lock sync.RWMutex
}
var (
ds defaulters
)
func init() {
ds = defaulters{
ds: make(map[string]*DefaulterWrap),
lock: sync.RWMutex{},
}
}
func RegisterDefaulter(name string, d *DefaulterWrap) {
ds.lock.Lock()
defer ds.lock.Unlock()
ds.ds[name] = d
}
func UnRegisterDefaulter(name string) {
ds.lock.Lock()
defer ds.lock.Unlock()
delete(ds.ds, name)
}
func GetDefaulter(name string) *DefaulterWrap {
ds.lock.Lock()
defer ds.lock.Unlock()
return ds.ds[name]
}
// Handle handles admission requests.
func (h *MutatingHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
defaulter := GetDefaulter(req.Kind.String())
if defaulter == nil {
return admission.Denied("crd has webhook configured, but the controller does not register the corresponding processing logic and refuses the operation by default.")
}
// Get the object in the request
obj := defaulter.Obj.DeepCopyObject()
err := h.decoder.Decode(req, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
// Default the object
defaulter.Helper.Default(obj)
marshalled, err := json.Marshal(obj)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
// Create the patch
return admission.PatchResponseFromRaw(req.Object.Raw, marshalled)
}

View File

@@ -1,146 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhooks
import (
"context"
"net/http"
"sync"
v1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// Validator defines functions for validating an operation
type Validator interface {
ValidateCreate(obj runtime.Object) error
ValidateUpdate(old runtime.Object, new runtime.Object) error
ValidateDelete(obj runtime.Object) error
}
type ValidatorWrap struct {
Obj runtime.Object
Helper Validator
}
type validators struct {
vs map[string]*ValidatorWrap
lock sync.RWMutex
}
var (
vs validators
)
func init() {
vs = validators{
vs: make(map[string]*ValidatorWrap),
lock: sync.RWMutex{},
}
}
func RegisterValidator(name string, v *ValidatorWrap) {
vs.lock.Lock()
defer vs.lock.Unlock()
vs.vs[name] = v
}
func UnRegisterValidator(name string) {
vs.lock.Lock()
defer vs.lock.Unlock()
delete(vs.vs, name)
}
func GetValidator(name string) *ValidatorWrap {
vs.lock.Lock()
defer vs.lock.Unlock()
return vs.vs[name]
}
type ValidatingHandler struct {
C client.Client
decoder *admission.Decoder
}
var _ admission.DecoderInjector = &ValidatingHandler{}
// InjectDecoder injects the decoder into a ValidatingHandler.
func (h *ValidatingHandler) InjectDecoder(d *admission.Decoder) error {
h.decoder = d
return nil
}
// Handle handles admission requests.
func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
validator := GetValidator(req.Kind.String())
if validator == nil {
return admission.Denied("crd has webhook configured, but the controller does not register the corresponding processing logic and refuses the operation by default.")
}
// Get the object in the request
obj := validator.Obj.DeepCopyObject()
if req.Operation == v1.Create {
err := h.decoder.Decode(req, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = validator.Helper.ValidateCreate(obj)
if err != nil {
return admission.Denied(err.Error())
}
}
if req.Operation == v1.Update {
oldObj := obj.DeepCopyObject()
err := h.decoder.DecodeRaw(req.Object, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = h.decoder.DecodeRaw(req.OldObject, oldObj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = validator.Helper.ValidateUpdate(oldObj, obj)
if err != nil {
return admission.Denied(err.Error())
}
}
if req.Operation == v1.Delete {
// In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346
// OldObject contains the object being deleted
err := h.decoder.DecodeRaw(req.OldObject, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
err = validator.Helper.ValidateDelete(obj)
if err != nil {
return admission.Denied(err.Error())
}
}
return admission.Allowed("")
}

View File

@@ -1,611 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package notification
import (
"context"
"encoding/json"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
toolscache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/api/notification/v2beta2"
"kubesphere.io/api/types/v1beta1"
"kubesphere.io/api/types/v1beta2"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"kubesphere.io/kubesphere/pkg/constants"
)
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
successSynced = "Synced"
controllerName = "notification-controller"
messageResourceSynced = "Notification synced successfully"
)
type Controller struct {
client.Client
ksCache cache.Cache
reconciledObjs []client.Object
informerSynced []toolscache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
}
func NewController(k8sClient kubernetes.Interface, ksClient client.Client, ksCache cache.Cache) (*Controller, error) {
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
ctl := &Controller{
Client: ksClient,
ksCache: ksCache,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Notification"),
recorder: recorder,
}
klog.Info("Setting up event handlers")
if err := ctl.setEventHandlers(); err != nil {
return nil, err
}
return ctl, nil
}
func (c *Controller) setEventHandlers() error {
if c.reconciledObjs != nil && len(c.reconciledObjs) > 0 {
c.reconciledObjs = c.reconciledObjs[:0]
}
c.reconciledObjs = append(c.reconciledObjs, &v2beta2.NotificationManager{})
c.reconciledObjs = append(c.reconciledObjs, &v2beta2.Config{})
c.reconciledObjs = append(c.reconciledObjs, &v2beta2.Receiver{})
c.reconciledObjs = append(c.reconciledObjs, &v2beta2.Router{})
c.reconciledObjs = append(c.reconciledObjs, &v2beta2.Silence{})
c.reconciledObjs = append(c.reconciledObjs, &corev1.Secret{})
c.reconciledObjs = append(c.reconciledObjs, &corev1.ConfigMap{})
if c.informerSynced != nil && len(c.informerSynced) > 0 {
c.informerSynced = c.informerSynced[:0]
}
for _, obj := range c.reconciledObjs {
if informer, err := c.ksCache.GetInformer(context.Background(), obj); err != nil {
klog.Errorf("get %s informer error, %v", obj.GetObjectKind().GroupVersionKind().String(), err)
return err
} else {
informer.AddEventHandler(toolscache.ResourceEventHandlerFuncs{
AddFunc: c.enqueue,
UpdateFunc: func(old, new interface{}) {
c.enqueue(new)
},
DeleteFunc: c.enqueue,
})
c.informerSynced = append(c.informerSynced, informer.HasSynced)
}
}
// Watch the cluster add and delete operations.
if informer, err := c.ksCache.GetInformer(context.Background(), &v1alpha1.Cluster{}); err != nil {
klog.Errorf("get cluster informer error, %v", err)
return err
} else {
informer.AddEventHandler(toolscache.ResourceEventHandlerFuncs{
AddFunc: c.enqueue,
DeleteFunc: c.enqueue,
})
c.informerSynced = append(c.informerSynced, informer.HasSynced)
}
return nil
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
klog.Info("Starting Notification controller")
// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
if ok := toolscache.WaitForCacheSync(stopCh, c.informerSynced...); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
}
func (c *Controller) enqueue(obj interface{}) {
c.workqueue.Add(obj)
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.reconcile(obj); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(obj)
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) reconcile(obj interface{}) error {
runtimeObj, ok := obj.(client.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("object does not implement the Object interfaces"))
return nil
}
// Only reconcile the secret and configmap which created by notification manager.
switch runtimeObj.(type) {
case *corev1.Secret, *corev1.ConfigMap:
if runtimeObj.GetNamespace() != constants.NotificationSecretNamespace ||
runtimeObj.GetLabels() == nil ||
runtimeObj.GetLabels()[constants.NotificationManagedLabel] != "true" {
klog.V(8).Infof("No need to reconcile %s/%s", runtimeObj.GetNamespace(), runtimeObj.GetName())
return nil
}
}
name := runtimeObj.GetName()
// The notification controller should update the annotations of secrets and configmaps managed by itself
// whenever a cluster is added or deleted. This way, the controller will have a chance to override the secret.
if _, ok := obj.(*v1alpha1.Cluster); ok {
err := c.updateSecretAndConfigmap()
if err != nil {
klog.Errorf("update secret and configmap failed, %s", err)
return err
}
return nil
}
err := c.Get(context.Background(), client.ObjectKey{Name: runtimeObj.GetName(), Namespace: runtimeObj.GetNamespace()}, runtimeObj)
if err != nil {
// The resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("obj '%s' in work queue no longer exists", name))
c.recorder.Event(runtimeObj, corev1.EventTypeNormal, successSynced, messageResourceSynced)
klog.Infof("Successfully synced %s", name)
return nil
}
klog.Error(err)
return err
}
if err = c.multiClusterSync(context.Background(), runtimeObj); err != nil {
return err
}
c.recorder.Event(runtimeObj, corev1.EventTypeNormal, successSynced, messageResourceSynced)
klog.Infof("Successfully synced %s", name)
return nil
}
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) multiClusterSync(ctx context.Context, obj client.Object) error {
if err := c.ensureNotControlledByKubefed(ctx, obj); err != nil {
klog.Error(err)
return err
}
clusterList := &v1alpha1.ClusterList{}
if err := c.ksCache.List(context.Background(), clusterList); err != nil {
return err
}
var clusters []string
for _, cluster := range clusterList.Items {
if cluster.DeletionTimestamp.IsZero() {
clusters = append(clusters, cluster.Name)
}
}
var fedObj client.Object
var fn controllerutil.MutateFn
switch obj := obj.(type) {
case *v2beta2.NotificationManager:
fedNotificationManager := &v1beta2.FederatedNotificationManager{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
},
}
fn = c.mutateFederatedNotificationManager(fedNotificationManager, obj)
fedObj = fedNotificationManager
case *v2beta2.Config:
fedConfig := &v1beta2.FederatedNotificationConfig{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
},
}
fn = c.mutateFederatedConfig(fedConfig, obj)
fedObj = fedConfig
case *v2beta2.Receiver:
fedReceiver := &v1beta2.FederatedNotificationReceiver{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
},
}
fn = c.mutateFederatedReceiver(fedReceiver, obj)
fedObj = fedReceiver
case *v2beta2.Router:
fedRouter := &v1beta2.FederatedNotificationRouter{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
},
}
fn = c.mutateFederatedRouter(fedRouter, obj)
fedObj = fedRouter
case *v2beta2.Silence:
fedSilence := &v1beta2.FederatedNotificationSilence{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
},
}
fn = c.mutateFederatedSilence(fedSilence, obj)
fedObj = fedSilence
case *corev1.Secret:
fedSecret := &v1beta1.FederatedSecret{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
Namespace: obj.Namespace,
},
}
fn = c.mutateFederatedSecret(fedSecret, obj, clusters)
fedObj = fedSecret
case *corev1.ConfigMap:
fedConfigmap := &v1beta1.FederatedConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: obj.Name,
Namespace: obj.Namespace,
},
}
fn = c.mutateFederatedConfigmap(fedConfigmap, obj, clusters)
fedObj = fedConfigmap
default:
klog.Errorf("unknown type for notification, %v", obj)
return nil
}
res, err := controllerutil.CreateOrUpdate(context.Background(), c.Client, fedObj, fn)
if err != nil {
klog.Errorf("CreateOrUpdate '%s' failed, %s", fedObj.GetName(), err)
} else {
klog.Infof("'%s' %s", fedObj.GetName(), res)
}
return err
}
func (c *Controller) mutateFederatedNotificationManager(fedObj *v1beta2.FederatedNotificationManager, obj *v2beta2.NotificationManager) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta2.FederatedNotificationManagerSpec{
Template: v1beta2.NotificationManagerTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: obj.Labels,
},
Spec: obj.Spec,
},
Placement: v1beta2.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
func (c *Controller) mutateFederatedConfig(fedObj *v1beta2.FederatedNotificationConfig, obj *v2beta2.Config) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta2.FederatedNotificationConfigSpec{
Template: v1beta2.NotificationConfigTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: obj.Labels,
},
Spec: obj.Spec,
},
Placement: v1beta2.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
func (c *Controller) mutateFederatedReceiver(fedObj *v1beta2.FederatedNotificationReceiver, obj *v2beta2.Receiver) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta2.FederatedNotificationReceiverSpec{
Template: v1beta2.NotificationReceiverTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: obj.Labels,
},
Spec: obj.Spec,
},
Placement: v1beta2.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
func (c *Controller) mutateFederatedRouter(fedObj *v1beta2.FederatedNotificationRouter, obj *v2beta2.Router) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta2.FederatedNotificationRouterSpec{
Template: v1beta2.NotificationRouterTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: obj.Labels,
},
Spec: obj.Spec,
},
Placement: v1beta2.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
func (c *Controller) mutateFederatedSilence(fedObj *v1beta2.FederatedNotificationSilence, obj *v2beta2.Silence) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta2.FederatedNotificationSilenceSpec{
Template: v1beta2.NotificationSilenceTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: obj.Labels,
},
Spec: obj.Spec,
},
Placement: v1beta2.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
func (c *Controller) mutateFederatedSecret(fedObj *v1beta1.FederatedSecret, obj *corev1.Secret, clusters []string) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta1.FederatedSecretSpec{
Template: v1beta1.SecretTemplate{
Data: obj.Data,
StringData: obj.StringData,
Type: obj.Type,
},
Placement: v1beta1.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
bs, err := json.Marshal(obj.Labels)
if err != nil {
return err
}
fedObj.Spec.Overrides = fedObj.Spec.Overrides[:0]
for _, cluster := range clusters {
fedObj.Spec.Overrides = append(fedObj.Spec.Overrides, v1beta1.GenericOverrideItem{
ClusterName: cluster,
ClusterOverrides: []v1beta1.ClusterOverride{
{
Path: "/metadata/labels",
Value: runtime.RawExtension{
Raw: bs,
},
},
},
})
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
func (c *Controller) mutateFederatedConfigmap(fedObj *v1beta1.FederatedConfigMap, obj *corev1.ConfigMap, clusters []string) controllerutil.MutateFn {
return func() error {
fedObj.Spec = v1beta1.FederatedConfigMapSpec{
Template: v1beta1.ConfigMapTemplate{
Data: obj.Data,
BinaryData: obj.BinaryData,
},
Placement: v1beta1.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
}
bs, err := json.Marshal(obj.Labels)
if err != nil {
return err
}
fedObj.Spec.Overrides = fedObj.Spec.Overrides[:0]
for _, cluster := range clusters {
fedObj.Spec.Overrides = append(fedObj.Spec.Overrides, v1beta1.GenericOverrideItem{
ClusterName: cluster,
ClusterOverrides: []v1beta1.ClusterOverride{
{
Path: "/metadata/labels",
Value: runtime.RawExtension{
Raw: bs,
},
},
},
})
}
return controllerutil.SetControllerReference(obj, fedObj, scheme.Scheme)
}
}
// Update the annotations of secrets managed by the notification controller to trigger a reconcile.
func (c *Controller) updateSecretAndConfigmap() error {
secretList := &corev1.SecretList{}
err := c.ksCache.List(context.Background(), secretList,
client.InNamespace(constants.NotificationSecretNamespace),
client.MatchingLabels{
constants.NotificationManagedLabel: "true",
})
if err != nil {
return err
}
for _, secret := range secretList.Items {
if secret.Annotations == nil {
secret.Annotations = make(map[string]string)
}
secret.Annotations["reloadtimestamp"] = time.Now().String()
if err := c.Update(context.Background(), &secret); err != nil {
return err
}
}
configmapList := &corev1.ConfigMapList{}
err = c.ksCache.List(context.Background(), configmapList,
client.InNamespace(constants.NotificationSecretNamespace),
client.MatchingLabels{
constants.NotificationManagedLabel: "true",
})
if err != nil {
return err
}
for _, configmap := range configmapList.Items {
if configmap.Annotations == nil {
configmap.Annotations = make(map[string]string)
}
configmap.Annotations["reloadtimestamp"] = time.Now().String()
if err := c.Update(context.Background(), &configmap); err != nil {
return err
}
}
return nil
}
func (c *Controller) ensureNotControlledByKubefed(ctx context.Context, obj client.Object) error {
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string, 0)
}
if labels[constants.KubefedManagedLabel] != "false" {
labels[constants.KubefedManagedLabel] = "false"
obj.SetLabels(labels)
err := c.Update(ctx, obj)
if err != nil {
klog.Error(err)
}
}
return nil
}

View File

@@ -1,303 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package notification
import (
"context"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
fakek8s "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
"kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/api/notification/v2beta2"
"kubesphere.io/api/types/v1beta1"
"kubesphere.io/api/types/v1beta2"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"kubesphere.io/kubesphere/pkg/apis"
"kubesphere.io/kubesphere/pkg/constants"
)
func TestSource(t *testing.T) {
RegisterFailHandler(Fail)
suiteName := "Cache Suite"
RunSpecs(t, suiteName)
}
var (
_ = Describe("Secret", func() {
_ = v2beta2.AddToScheme(scheme.Scheme)
_ = apis.AddToScheme(scheme.Scheme)
_ = v1beta2.AddToScheme(scheme.Scheme)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "secret-foo",
Namespace: constants.NotificationSecretNamespace,
Labels: map[string]string{
constants.NotificationManagedLabel: "true",
},
},
}
config := &v2beta2.Config{
ObjectMeta: metav1.ObjectMeta{
Name: "config-foo",
Labels: map[string]string{
"type": "global",
},
},
}
receiver := &v2beta2.Receiver{
ObjectMeta: metav1.ObjectMeta{
Name: "receiver-foo",
Labels: map[string]string{
"type": "default",
},
},
}
router := &v2beta2.Router{
ObjectMeta: metav1.ObjectMeta{
Name: "router-foo",
},
}
silence := &v2beta2.Silence{
ObjectMeta: metav1.ObjectMeta{
Name: "silence-foo",
Labels: map[string]string{
"type": "global",
},
},
}
host := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "host",
},
}
var (
cl client.Client
ksCache cache.Cache
k8sClient kubernetes.Interface
informerCacheCtx context.Context
)
BeforeEach(func() {
k8sClient = fakek8s.NewSimpleClientset()
//nolint:staticcheck
cl = fake.NewFakeClientWithScheme(scheme.Scheme)
informerCacheCtx = context.TODO()
ksCache = &fakeCache{
k8sClient,
cl,
}
})
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.
Context("Notification Controller", func() {
It("Should create successfully", func() {
r, err := NewController(k8sClient, cl, ksCache)
Expect(err).ToNot(HaveOccurred())
// Create a secret
Expect(cl.Create(context.Background(), secret)).Should(Succeed())
Expect(r.reconcile(secret)).Should(Succeed())
fedSecret := &v1beta1.FederatedSecret{}
By("Expecting to create federated secret successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: secret.Name, Namespace: constants.NotificationSecretNamespace}, fedSecret)
Expect(err).Should(Succeed())
Expect(fedSecret.Name).Should(Equal(secret.Name))
// Update a secret
err = ksCache.Get(context.Background(), client.ObjectKey{Name: secret.Name, Namespace: constants.NotificationSecretNamespace}, secret)
Expect(err).Should(Succeed())
secret.StringData = map[string]string{"foo": "bar"}
Expect(cl.Update(context.Background(), secret)).Should(Succeed())
Expect(r.reconcile(secret)).Should(Succeed())
By("Expecting to update federated secret successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: secret.Name, Namespace: constants.NotificationSecretNamespace}, fedSecret)
Expect(err).Should(Succeed())
Expect(fedSecret.Spec.Template.StringData["foo"]).Should(Equal("bar"))
// Create a Config
Expect(cl.Create(context.Background(), config)).Should(Succeed())
Expect(r.reconcile(config)).Should(Succeed())
fedConfig := &v1beta2.FederatedNotificationConfig{}
By("Expecting to create federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: config.Name}, fedConfig)
Expect(err).Should(Succeed())
Expect(fedConfig.Name).Should(Equal(config.Name))
// Update a config
err = ksCache.Get(context.Background(), client.ObjectKey{Name: config.Name}, config)
Expect(err).Should(Succeed())
config.Labels = map[string]string{"foo": "bar"}
Expect(cl.Update(context.Background(), config)).Should(Succeed())
Expect(r.reconcile(config)).Should(Succeed())
By("Expecting to update federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: config.Name}, fedConfig)
Expect(err).Should(Succeed())
Expect(fedConfig.Spec.Template.Labels["foo"]).Should(Equal("bar"))
// Create a receiver
Expect(cl.Create(context.Background(), receiver)).Should(Succeed())
Expect(r.reconcile(receiver)).Should(Succeed())
fedReceiver := &v1beta2.FederatedNotificationReceiver{}
By("Expecting to create federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: receiver.Name}, fedReceiver)
Expect(err).Should(Succeed())
Expect(fedReceiver.Name).Should(Equal(receiver.Name))
// Update a receiver
err = ksCache.Get(context.Background(), client.ObjectKey{Name: receiver.Name}, receiver)
Expect(err).Should(Succeed())
receiver.Labels = map[string]string{"foo": "bar"}
Expect(cl.Update(context.Background(), receiver)).Should(Succeed())
Expect(r.reconcile(receiver)).Should(Succeed())
By("Expecting to update federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: receiver.Name}, fedReceiver)
Expect(err).Should(Succeed())
Expect(fedReceiver.Spec.Template.Labels["foo"]).Should(Equal("bar"))
// Create a router
Expect(cl.Create(context.Background(), router)).Should(Succeed())
Expect(r.reconcile(router)).Should(Succeed())
fedRouter := &v1beta2.FederatedNotificationRouter{}
By("Expecting to create federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: router.Name}, fedRouter)
Expect(err).Should(Succeed())
Expect(fedRouter.Name).Should(Equal(router.Name))
// Update a receiver
err = ksCache.Get(context.Background(), client.ObjectKey{Name: router.Name}, router)
Expect(err).Should(Succeed())
router.Labels = map[string]string{"foo": "bar"}
Expect(cl.Update(context.Background(), router)).Should(Succeed())
Expect(r.reconcile(router)).Should(Succeed())
By("Expecting to update federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: router.Name}, fedRouter)
Expect(err).Should(Succeed())
Expect(fedRouter.Spec.Template.Labels["foo"]).Should(Equal("bar"))
// Create a receiver
Expect(cl.Create(context.Background(), silence)).Should(Succeed())
Expect(r.reconcile(silence)).Should(Succeed())
fedSilence := &v1beta2.FederatedNotificationSilence{}
By("Expecting to create federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: silence.Name}, fedSilence)
Expect(err).Should(Succeed())
Expect(fedSilence.Name).Should(Equal(silence.Name))
// Update a receiver
err = ksCache.Get(context.Background(), client.ObjectKey{Name: silence.Name}, silence)
Expect(err).Should(Succeed())
silence.Labels = map[string]string{"foo": "bar"}
Expect(cl.Update(context.Background(), silence)).Should(Succeed())
Expect(r.reconcile(silence)).Should(Succeed())
By("Expecting to update federated object successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: silence.Name}, fedSilence)
Expect(err).Should(Succeed())
Expect(fedSilence.Spec.Template.Labels["foo"]).Should(Equal("bar"))
// Add a cluster
Expect(cl.Create(informerCacheCtx, host)).Should(Succeed())
Expect(r.reconcile(secret)).Should(Succeed())
By("Expecting to update federated secret successfully")
err = ksCache.Get(context.Background(), client.ObjectKey{Name: secret.Name, Namespace: constants.NotificationSecretNamespace}, fedSecret)
Expect(err).Should(Succeed())
Expect(fedSecret.Spec.Overrides[0].ClusterName).Should(Equal("host"))
// Delete a cluster
Expect(cl.Delete(informerCacheCtx, host)).Should(Succeed())
Expect(r.reconcile(secret)).Should(Succeed())
By("Expecting to update federated secret successfully")
fedSecret = &v1beta1.FederatedSecret{}
err = ksCache.Get(context.Background(), client.ObjectKey{Name: secret.Name, Namespace: constants.NotificationSecretNamespace}, fedSecret)
Expect(err).Should(Succeed())
Expect(fedSecret.Spec.Overrides).Should(BeNil())
})
})
})
)
const defaultResync = 600 * time.Second
type fakeCache struct {
K8sClient kubernetes.Interface
client.Reader
}
// GetInformerForKind returns the informer for the GroupVersionKind
func (f *fakeCache) GetInformerForKind(_ context.Context, _ schema.GroupVersionKind) (cache.Informer, error) {
return nil, nil
}
// GetInformer returns the informer for the obj
func (f *fakeCache) GetInformer(_ context.Context, _ client.Object) (cache.Informer, error) {
fakeInformerFactory := k8sinformers.NewSharedInformerFactory(f.K8sClient, defaultResync)
return fakeInformerFactory.Core().V1().Namespaces().Informer(), nil
}
func (f *fakeCache) IndexField(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error {
return nil
}
func (f *fakeCache) Start(_ context.Context) error {
return nil
}
func (f *fakeCache) WaitForCacheSync(_ context.Context) bool {
return true
}

View File

@@ -0,0 +1,93 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package openapi
import (
"github.com/go-openapi/spec"
"kubesphere.io/kubesphere/pkg/api"
ksVersion "kubesphere.io/kubesphere/pkg/version"
)
func EnrichSwaggerObject(swo *spec.Swagger) {
swo.Info = &spec.Info{
InfoProps: spec.InfoProps{
Title: "KubeSphere API",
Description: "KubeSphere Enterprise OpenAPI",
Version: ksVersion.Get().GitVersion,
Contact: &spec.ContactInfo{
ContactInfoProps: spec.ContactInfoProps{
Name: "KubeSphere",
URL: "https://kubesphere.com.cn",
Email: "support@kubesphere.cloud",
},
},
},
}
// setup security definitions
swo.SecurityDefinitions = map[string]*spec.SecurityScheme{
"BearerToken": {SecuritySchemeProps: spec.SecuritySchemeProps{
Type: "apiKey",
Name: "Authorization",
In: "header",
Description: "Bearer Token Authentication",
}},
}
swo.Security = []map[string][]string{{"BearerToken": []string{}}}
swo.Tags = []spec.Tag{
{
TagProps: spec.TagProps{
Name: api.TagAuthentication,
},
},
{
TagProps: spec.TagProps{
Name: api.TagMultiCluster,
},
},
{
TagProps: spec.TagProps{
Name: api.TagIdentityManagement,
},
},
{
TagProps: spec.TagProps{
Name: api.TagAccessManagement,
},
},
{
TagProps: spec.TagProps{
Name: api.TagClusterResources,
},
},
{
TagProps: spec.TagProps{
Name: api.TagNamespacedResources,
},
},
{
TagProps: spec.TagProps{
Name: api.TagComponentStatus,
},
},
{
TagProps: spec.TagProps{
Name: api.TagUserRelatedResources,
},
},
{
TagProps: spec.TagProps{
Name: api.TagTerminal,
},
},
{
TagProps: spec.TagProps{
Name: api.TagNonResourceAPI,
},
},
}
}

View File

@@ -0,0 +1,69 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package openapi
import (
"context"
"fmt"
toolscache "k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
runtimecache "sigs.k8s.io/controller-runtime/pkg/cache"
extensionsv1alpha1 "kubesphere.io/api/extensions/v1alpha1"
"kubesphere.io/kubesphere/kube/pkg/openapi"
)
var SharedOpenAPIController = NewController()
type Controller struct{}
func NewController() *Controller {
return &Controller{}
}
func (c *Controller) WatchOpenAPIChanges(ctx context.Context, cache runtimecache.Cache, openAPIV2Service openapi.APIServiceManager, openAPIV3Service openapi.APIServiceManager) error {
informer, err := cache.GetInformer(ctx, &extensionsv1alpha1.APIService{})
if err != nil {
return fmt.Errorf("get informer failed: %w", err)
}
_, err = informer.AddEventHandler(toolscache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
apiService := obj.(*extensionsv1alpha1.APIService)
return apiService.Status.State == extensionsv1alpha1.StateAvailable
},
Handler: &toolscache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
apiService := obj.(*extensionsv1alpha1.APIService)
if err := openAPIV2Service.AddUpdateApiService(apiService); err != nil {
klog.Error(err)
}
if err := openAPIV3Service.AddUpdateApiService(apiService); err != nil {
klog.Error(err)
}
},
UpdateFunc: func(old, new interface{}) {
apiService := new.(*extensionsv1alpha1.APIService)
if err := openAPIV2Service.AddUpdateApiService(apiService); err != nil {
klog.Error(err)
}
if err := openAPIV3Service.AddUpdateApiService(apiService); err != nil {
klog.Error(err)
}
},
DeleteFunc: func(obj interface{}) {
apiService := obj.(*extensionsv1alpha1.APIService)
openAPIV2Service.RemoveApiService(apiService.Name)
openAPIV3Service.RemoveApiService(apiService.Name)
},
},
})
if err != nil {
return fmt.Errorf("add event handler failed: %w", err)
}
return nil
}

View File

@@ -1,13 +0,0 @@
approvers:
- zheng1
- wansir
- zryfish
reviewers:
- zheng1
- wansir
- zryfish
- xyz-li
labels:
- area/app-management

View File

@@ -1,271 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmapplication
import (
"context"
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/labels"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
)
func init() {
registerMetrics()
}
var _ reconcile.Reconciler = &ReconcileHelmApplication{}
// ReconcileHelmApplication reconciles a federated helm application object
type ReconcileHelmApplication struct {
client.Client
}
const (
appFinalizer = "helmapplication.application.kubesphere.io"
)
func (r *ReconcileHelmApplication) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
klog.V(4).Infof("sync helm application: %s ", request.String())
rootCtx := context.Background()
app := &v1alpha1.HelmApplication{}
err := r.Client.Get(rootCtx, request.NamespacedName, app)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
if app.DeletionTimestamp == nil {
// new app, update finalizer
if !sliceutil.HasString(app.ObjectMeta.Finalizers, appFinalizer) {
app.ObjectMeta.Finalizers = append(app.ObjectMeta.Finalizers, appFinalizer)
if err := r.Update(rootCtx, app); err != nil {
return reconcile.Result{}, err
}
// create app success
appOperationTotal.WithLabelValues("creation", app.GetTrueName(), strconv.FormatBool(inAppStore(app))).Inc()
}
if !inAppStore(app) {
// The workspace of this app is being deleting, clean up this app
if err := r.cleanupDanglingApp(context.TODO(), app); err != nil {
return reconcile.Result{}, err
}
if app.Status.State == v1alpha1.StateActive ||
app.Status.State == v1alpha1.StateSuspended {
if err := r.createAppCopyInAppStore(rootCtx, app); err != nil {
klog.Errorf("create app copy failed, error: %s", err)
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
}
// app has changed, update app status
return reconcile.Result{}, updateHelmApplicationStatus(r.Client, strings.TrimSuffix(app.Name, v1alpha1.HelmApplicationAppStoreSuffix), inAppStore(app))
} else {
// delete app copy in appStore
if !inAppStore(app) {
if err := r.deleteAppCopyInAppStore(rootCtx, app.Name); err != nil {
return reconcile.Result{}, err
}
}
app.ObjectMeta.Finalizers = sliceutil.RemoveString(app.ObjectMeta.Finalizers, func(item string) bool {
return item == appFinalizer
})
klog.V(4).Info("update app")
if err := r.Update(rootCtx, app); err != nil {
klog.Errorf("update app failed, error: %s", err)
return ctrl.Result{}, err
} else {
// delete app success
appOperationTotal.WithLabelValues("deletion", app.GetTrueName(), strconv.FormatBool(inAppStore(app))).Inc()
}
}
return reconcile.Result{}, nil
}
func (r *ReconcileHelmApplication) deleteAppCopyInAppStore(ctx context.Context, name string) error {
appInStore := &v1alpha1.HelmApplication{}
err := r.Client.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s%s", name, v1alpha1.HelmApplicationAppStoreSuffix)}, appInStore)
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
} else {
err = r.Delete(ctx, appInStore)
return err
}
return nil
}
// createAppCopyInAppStore create a application copy in app store
func (r *ReconcileHelmApplication) createAppCopyInAppStore(ctx context.Context, originApp *v1alpha1.HelmApplication) error {
name := fmt.Sprintf("%s%s", originApp.Name, v1alpha1.HelmApplicationAppStoreSuffix)
app := &v1alpha1.HelmApplication{}
err := r.Get(ctx, types.NamespacedName{Name: name}, app)
if err != nil && !apierrors.IsNotFound(err) {
return err
}
if app.Name == "" {
app.Name = name
labels := originApp.Labels
if len(labels) == 0 {
labels = make(map[string]string, 3)
}
labels[constants.ChartRepoIdLabelKey] = v1alpha1.AppStoreRepoId
// assign a default category to app
if labels[constants.CategoryIdLabelKey] == "" {
labels[constants.CategoryIdLabelKey] = v1alpha1.UncategorizedId
}
// record the original workspace
labels[v1alpha1.OriginWorkspaceLabelKey] = originApp.GetWorkspace()
// apps in store are global resource.
delete(labels, constants.WorkspaceLabelKey)
app.Annotations = originApp.Annotations
app.Labels = labels
app.Spec = *originApp.Spec.DeepCopy()
err = r.Create(context.TODO(), app)
if err != nil {
return err
}
}
return nil
}
func (r *ReconcileHelmApplication) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HelmApplication{}).Complete(r)
}
func inAppStore(app *v1alpha1.HelmApplication) bool {
return strings.HasSuffix(app.Name, v1alpha1.HelmApplicationAppStoreSuffix)
}
// cleanupDanglingApp deletes the app when it is not active and not suspended,
// sets the workspace label to empty and remove parts of the appversion when app state is active or suspended.
//
// When one workspace is being deleting, we can delete all the app which are not active or suspended of this workspace,
// but when an app has been promoted to app store, we have to deal with it specially.
// If we just delete that app, then this app will be deleted from app store too.
// If we leave it alone, and user creates a workspace with the same name sometime,
// then this app will appear in this new workspace which confuses the user.
// So we need to delete all the appversion which are not active or suspended first,
// then remove the workspace label from the app. And on the console of ks, we will show something
// like "(workspace deleted)" to user for this app.
func (r *ReconcileHelmApplication) cleanupDanglingApp(ctx context.Context, app *v1alpha1.HelmApplication) error {
if app.Annotations != nil && app.Annotations[constants.DanglingAppCleanupKey] == constants.CleanupDanglingAppOngoing {
// Just delete the app when the state is not active or not suspended.
if app.Status.State != v1alpha1.StateActive && app.Status.State != v1alpha1.StateSuspended {
err := r.Delete(ctx, app)
if err != nil {
klog.Errorf("delete app: %s, state: %s, error: %s",
app.GetHelmApplicationId(), app.Status.State, err)
return err
}
return nil
}
var appVersions v1alpha1.HelmApplicationVersionList
err := r.List(ctx, &appVersions, &client.ListOptions{LabelSelector: labels.SelectorFromSet(map[string]string{
constants.ChartApplicationIdLabelKey: app.GetHelmApplicationId()})})
if err != nil {
klog.Errorf("list app version of %s failed, error: %s", app.GetHelmApplicationId(), err)
return err
}
// Delete app version where are not active and not suspended.
for _, version := range appVersions.Items {
if version.Status.State != v1alpha1.StateActive && version.Status.State != v1alpha1.StateSuspended {
err = r.Delete(ctx, &version)
if err != nil {
klog.Errorf("delete app version: %s, state: %s, error: %s",
version.GetHelmApplicationVersionId(), version.Status.State, err)
return err
}
}
}
// Mark the app that the workspace to which it belongs has been deleted.
var appInStore v1alpha1.HelmApplication
err = r.Get(ctx,
types.NamespacedName{Name: fmt.Sprintf("%s%s", app.GetHelmApplicationId(), v1alpha1.HelmApplicationAppStoreSuffix)}, &appInStore)
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
} else {
appCopy := appInStore.DeepCopy()
if appCopy.Annotations == nil {
appCopy.Annotations = map[string]string{}
}
appCopy.Annotations[constants.DanglingAppCleanupKey] = constants.CleanupDanglingAppDone
patchedApp := client.MergeFrom(&appInStore)
err = r.Patch(ctx, appCopy, patchedApp)
if err != nil {
klog.Errorf("patch app: %s failed, error: %s", app.GetHelmApplicationId(), err)
return err
}
}
appCopy := app.DeepCopy()
appCopy.Annotations[constants.DanglingAppCleanupKey] = constants.CleanupDanglingAppDone
// Remove the workspace label, or if user creates a workspace with the same name, this app will show in the new workspace.
if appCopy.Labels == nil {
appCopy.Labels = map[string]string{}
}
appCopy.Labels[constants.WorkspaceLabelKey] = ""
patchedApp := client.MergeFrom(app)
err = r.Patch(ctx, appCopy, patchedApp)
if err != nil {
klog.Errorf("patch app: %s failed, error: %s", app.GetHelmApplicationId(), err)
return err
}
}
return nil
}

View File

@@ -1,102 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmapplication
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/onsi/gomega/gexec"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/apis"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var k8sClient client.Client
var k8sManager ctrl.Manager
var testEnv *envtest.Environment
func TestHelmApplicationController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HelmCategory Application Test Suite")
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
t := true
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "ks-core", "crds")},
AttachControlPlaneOutput: false,
}
}
cfg, err := testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
MetricsBindAddress: "0",
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&ReconcileHelmApplication{}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
err = (&ReconcileHelmApplicationVersion{}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
gexec.KillAndWait(5 * time.Second)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})

View File

@@ -1,139 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmapplication
import (
"context"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/idutils"
)
var _ = Describe("helmApplication", func() {
const timeout = time.Second * 240
const interval = time.Second * 1
app := createApp()
appVer := createAppVersion(app.GetHelmApplicationId(), "0.0.1")
appVer2 := createAppVersion(app.GetHelmApplicationId(), "0.0.2")
BeforeEach(func() {
err := k8sClient.Create(context.Background(), app)
Expect(err).NotTo(HaveOccurred())
err = k8sClient.Create(context.Background(), appVer)
Expect(err).NotTo(HaveOccurred())
err = k8sClient.Create(context.Background(), appVer2)
Expect(err).NotTo(HaveOccurred())
})
Context("Helm Application Controller", func() {
It("Should success", func() {
By("Update helm app version status")
Eventually(func() bool {
k8sClient.Get(context.Background(), types.NamespacedName{Name: appVer.Name}, appVer)
appVer.Status = v1alpha1.HelmApplicationVersionStatus{
State: v1alpha1.StateActive,
}
err := k8sClient.Status().Update(context.Background(), appVer)
return err == nil
}, timeout, interval).Should(BeTrue())
By("Wait for app status become active")
Eventually(func() bool {
var localApp v1alpha1.HelmApplication
appKey := types.NamespacedName{
Name: app.Name,
}
k8sClient.Get(context.Background(), appKey, &localApp)
return localApp.State() == v1alpha1.StateActive
}, timeout, interval).Should(BeTrue())
By("Mark workspace is deleted")
Eventually(func() bool {
var localApp v1alpha1.HelmApplication
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: app.Name}, &localApp)
if err != nil {
return false
}
appCopy := localApp.DeepCopy()
appCopy.Annotations = map[string]string{}
appCopy.Annotations[constants.DanglingAppCleanupKey] = constants.CleanupDanglingAppOngoing
patchData := client.MergeFrom(&localApp)
err = k8sClient.Patch(context.Background(), appCopy, patchData)
return err == nil
}, timeout, interval).Should(BeTrue())
By("Draft app version are deleted")
Eventually(func() bool {
var ver v1alpha1.HelmApplicationVersion
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: appVer2.Name}, &ver)
return apierrors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
By("Active app version exists")
Eventually(func() bool {
var ver v1alpha1.HelmApplicationVersion
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: appVer.Name}, &ver)
return err == nil
}, timeout, interval).Should(BeTrue())
})
})
})
func createApp() *v1alpha1.HelmApplication {
return &v1alpha1.HelmApplication{
ObjectMeta: metav1.ObjectMeta{
Name: idutils.GetUuid36(v1alpha1.HelmApplicationIdPrefix),
},
Spec: v1alpha1.HelmApplicationSpec{
Name: "dummy-chart",
},
}
}
func createAppVersion(appId string, version string) *v1alpha1.HelmApplicationVersion {
return &v1alpha1.HelmApplicationVersion{
ObjectMeta: metav1.ObjectMeta{
Name: idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix),
Labels: map[string]string{
constants.ChartApplicationIdLabelKey: appId,
},
},
Spec: v1alpha1.HelmApplicationVersionSpec{
Metadata: &v1alpha1.Metadata{
Version: version,
Name: "dummy-chart",
},
},
}
}

View File

@@ -1,285 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmapplication
import (
"context"
"fmt"
"time"
"github.com/Masterminds/semver/v3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
)
const (
HelmAppVersionFinalizer = "helmappversion.application.kubesphere.io"
)
var _ reconcile.Reconciler = &ReconcileHelmApplicationVersion{}
// ReconcileHelmApplicationVersion reconciles a helm application version object
type ReconcileHelmApplicationVersion struct {
client.Client
}
// Reconcile reads that state of the cluster for a helmapplicationversions object and makes changes based on the state read
// and what is in the helmapplicationversions.Spec
func (r *ReconcileHelmApplicationVersion) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
klog.V(4).Infof("sync helm application version: %s", request.String())
defer func() {
klog.V(4).Infof("sync helm application version end: %s, elapsed: %v", request.String(), time.Since(start))
}()
appVersion := &v1alpha1.HelmApplicationVersion{}
err := r.Client.Get(context.TODO(), request.NamespacedName, appVersion)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if appVersion.ObjectMeta.DeletionTimestamp.IsZero() {
if appVersion.Status.State == "" {
// set status to draft
return reconcile.Result{}, r.updateStatus(appVersion)
}
if !sliceutil.HasString(appVersion.ObjectMeta.Finalizers, HelmAppVersionFinalizer) {
appVersion.ObjectMeta.Finalizers = append(appVersion.ObjectMeta.Finalizers, HelmAppVersionFinalizer)
if err := r.Update(context.Background(), appVersion); err != nil {
return reconcile.Result{}, err
} else {
return reconcile.Result{}, nil
}
}
} else {
// The object is being deleted
if sliceutil.HasString(appVersion.ObjectMeta.Finalizers, HelmAppVersionFinalizer) {
// update related helm application
err = updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), false)
if err != nil {
return reconcile.Result{}, err
}
err = updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), true)
if err != nil {
return reconcile.Result{}, err
}
// Delete HelmApplicationVersion
appVersion.ObjectMeta.Finalizers = sliceutil.RemoveString(appVersion.ObjectMeta.Finalizers, func(item string) bool {
return item == HelmAppVersionFinalizer
})
if err := r.Update(context.Background(), appVersion); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
// update related helm application
err = updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), false)
if err != nil {
return reconcile.Result{}, err
}
if appVersion.Status.State == v1alpha1.StateActive {
// add labels to helm application version
// The label will exists forever, since this helmapplicationversion's state only can be active and suspend.
if appVersion.GetHelmRepoId() == "" {
instanceCopy := appVersion.DeepCopy()
instanceCopy.Labels[constants.ChartRepoIdLabelKey] = v1alpha1.AppStoreRepoId
patch := client.MergeFrom(appVersion)
err = r.Client.Patch(context.TODO(), instanceCopy, patch)
if err != nil {
return reconcile.Result{}, err
}
}
app := v1alpha1.HelmApplication{}
err = r.Get(context.TODO(), types.NamespacedName{Name: appVersion.GetHelmApplicationId()}, &app)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), true)
} else if appVersion.Status.State == v1alpha1.StateSuspended {
return reconcile.Result{}, updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), true)
}
return reconcile.Result{}, nil
}
func updateHelmApplicationStatus(c client.Client, appId string, inAppStore bool) error {
app := v1alpha1.HelmApplication{}
var err error
if inAppStore {
// application name ends with `-store`
err = c.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s%s", appId, v1alpha1.HelmApplicationAppStoreSuffix)}, &app)
} else {
err = c.Get(context.TODO(), types.NamespacedName{Name: appId}, &app)
}
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return err
}
if !app.DeletionTimestamp.IsZero() {
return nil
}
var versions v1alpha1.HelmApplicationVersionList
err = c.List(context.TODO(), &versions, client.MatchingLabels{
constants.ChartApplicationIdLabelKey: appId,
})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
latestVersionName := getLatestVersionName(versions, inAppStore)
state := mergeApplicationVersionState(versions)
now := time.Now()
if state != app.Status.State {
// update StatusTime when state changed
app.Status.StatusTime = &metav1.Time{Time: now}
}
if state != app.Status.State || latestVersionName != app.Status.LatestVersion {
app.Status.State = state
app.Status.LatestVersion = latestVersionName
app.Status.UpdateTime = &metav1.Time{Time: now}
err := c.Status().Update(context.TODO(), &app)
if err != nil {
return err
}
}
return nil
}
func (r *ReconcileHelmApplicationVersion) updateStatus(appVersion *v1alpha1.HelmApplicationVersion) error {
appVersion.Status = v1alpha1.HelmApplicationVersionStatus{
State: v1alpha1.StateDraft,
Audit: []v1alpha1.Audit{
{
State: v1alpha1.StateDraft,
Time: appVersion.CreationTimestamp,
Operator: appVersion.GetCreator(),
},
},
}
err := r.Status().Update(context.TODO(), appVersion)
if err != nil {
return err
}
return nil
}
// getLatestVersionName get the latest version of versions.
// if inAppStore is false, get the latest version name of all of the versions
// if inAppStore is true, get the latest version name of the ACTIVE versions.
func getLatestVersionName(versions v1alpha1.HelmApplicationVersionList, inAppStore bool) string {
if len(versions.Items) == 0 {
return ""
}
var latestVersionName string
var latestSemver *semver.Version
for _, version := range versions.Items {
// If the appVersion is being deleted, ignore it.
// If inAppStore is true, we just need ACTIVE appVersion.
if version.DeletionTimestamp != nil || (inAppStore && version.Status.State != v1alpha1.StateActive) {
continue
}
currSemver, err := semver.NewVersion(version.GetSemver())
if err == nil {
if latestSemver == nil {
// the first valid semver
latestSemver = currSemver
latestVersionName = version.GetVersionName()
} else if latestSemver.LessThan(currSemver) {
// find a newer valid semver
latestSemver = currSemver
latestVersionName = version.GetVersionName()
}
} else {
// If the semver is invalid, just ignore it.
klog.V(2).Infof("parse version failed, id: %s, err: %s", version.Name, err)
}
}
return latestVersionName
}
func mergeApplicationVersionState(versions v1alpha1.HelmApplicationVersionList) string {
states := make(map[string]int, len(versions.Items))
for _, version := range versions.Items {
if version.DeletionTimestamp == nil {
state := version.Status.State
states[state] = states[state] + 1
}
}
// If there is one or more active appVersion, the helm application is active
if states[v1alpha1.StateActive] > 0 {
return v1alpha1.StateActive
}
// All appVersion is draft, the helm application is draft
if states[v1alpha1.StateDraft] == len(versions.Items) {
return v1alpha1.StateDraft
}
// No active appVersion or draft appVersion, then the app state is suspended
if states[v1alpha1.StateSuspended] > 0 {
return v1alpha1.StateSuspended
}
// default state is draft
return v1alpha1.StateDraft
}
func (r *ReconcileHelmApplicationVersion) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HelmApplicationVersion{}).
Complete(r)
}

View File

@@ -1,45 +0,0 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmapplication
import (
compbasemetrics "k8s.io/component-base/metrics"
"kubesphere.io/kubesphere/pkg/utils/metrics"
)
var (
appOperationTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Subsystem: "ks_cm",
Name: "helm_application_operation_total",
Help: "Counter of app creation and deletion",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"verb", "name", "appstore"},
)
metricsList = []compbasemetrics.Registerable{
appOperationTotal,
}
)
func registerMetrics() {
for _, m := range metricsList {
metrics.MustRegister(m)
}
}

View File

@@ -1,329 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmcategory
import (
"context"
"time"
ctrl "sigs.k8s.io/controller-runtime"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
)
const (
HelmCategoryFinalizer = "helmcategories.application.kubesphere.io"
)
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("helm-category-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to HelmCategory
err = c.Watch(&source.Kind{Type: &v1alpha1.HelmCategory{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
reconcileObj := r.(*ReconcileHelmCategory)
// Watch for changes to HelmApplication
err = c.Watch(&source.Kind{Type: &v1alpha1.HelmApplication{}}, &handler.Funcs{
CreateFunc: func(event event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
app := event.Object.(*v1alpha1.HelmApplication)
err := reconcileObj.updateUncategorizedApplicationLabels(app)
if err != nil {
limitingInterface.AddAfter(event, 20*time.Second)
return
}
repoId := app.GetHelmRepoId()
if repoId == v1alpha1.AppStoreRepoId {
ctgId := app.GetHelmCategoryId()
if ctgId == "" {
ctgId = v1alpha1.UncategorizedId
}
err := reconcileObj.updateCategoryCount(ctgId)
if err != nil {
klog.Errorf("reconcile category %s failed, error: %s", ctgId, err)
}
}
},
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
oldApp := updateEvent.ObjectOld.(*v1alpha1.HelmApplication)
newApp := updateEvent.ObjectNew.(*v1alpha1.HelmApplication)
err := reconcileObj.updateUncategorizedApplicationLabels(newApp)
if err != nil {
limitingInterface.AddAfter(updateEvent, 20*time.Second)
return
}
var oldId string
repoId := newApp.GetHelmRepoId()
if repoId == v1alpha1.AppStoreRepoId {
oldId = oldApp.GetHelmCategoryId()
if oldId == "" {
oldId = v1alpha1.UncategorizedId
}
err := reconcileObj.updateCategoryCount(oldId)
if err != nil {
klog.Errorf("reconcile category %s failed, error: %s", oldId, err)
}
}
// new labels and new repo id
repoId = newApp.GetHelmRepoId()
if repoId == v1alpha1.AppStoreRepoId {
// new category id
newId := newApp.GetHelmCategoryId()
if newId == "" {
newId = v1alpha1.UncategorizedId
}
if oldId != newId {
err := reconcileObj.updateCategoryCount(newId)
if err != nil {
klog.Errorf("reconcile category %s failed, error: %s", newId, err)
}
}
}
},
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
app := deleteEvent.Object.(*v1alpha1.HelmApplication)
repoId := app.GetHelmRepoId()
if repoId == v1alpha1.AppStoreRepoId {
id := app.GetHelmCategoryId()
if id == "" {
id = v1alpha1.UncategorizedId
}
err := reconcileObj.updateCategoryCount(id)
if err != nil {
klog.Errorf("reconcile category %s failed, error: %s", id, err)
}
}
},
})
if err != nil {
return err
}
go func() {
// create Uncategorized object
ticker := time.NewTicker(15 * time.Second)
for range ticker.C {
ctg := &v1alpha1.HelmCategory{}
err := reconcileObj.Get(context.TODO(), types.NamespacedName{Name: v1alpha1.UncategorizedId}, ctg)
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("get helm category: %s failed, error: %s", v1alpha1.UncategorizedId, err)
}
if ctg.Name != "" {
// category exists now
return
}
ctg = &v1alpha1.HelmCategory{
ObjectMeta: metav1.ObjectMeta{
Name: v1alpha1.UncategorizedId,
},
Spec: v1alpha1.HelmCategorySpec{
Description: v1alpha1.UncategorizedName,
Name: v1alpha1.UncategorizedName,
},
}
err = reconcileObj.Create(context.TODO(), ctg)
if err != nil {
klog.Errorf("create helm category: %s failed, error: %s", v1alpha1.UncategorizedName, err)
}
}
}()
return nil
}
var _ reconcile.Reconciler = &ReconcileHelmCategory{}
// ReconcileWorkspace reconciles a Workspace object
type ReconcileHelmCategory struct {
client.Client
//Scheme *runtime.Scheme
}
func (r *ReconcileHelmCategory) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
return add(mgr, r)
}
// Reconcile reads that state of the cluster for a helmcategories object and makes changes based on the state read
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmcategories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmcategories/status,verbs=get;update;patch
func (r *ReconcileHelmCategory) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
klog.V(4).Infof("sync helm category: %s", request.String())
defer func() {
klog.V(4).Infof("sync helm category end: %s, elapsed: %v", request.String(), time.Since(start))
}()
instance := &v1alpha1.HelmCategory{}
err := r.Client.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
if request.Name == v1alpha1.UncategorizedId {
err = r.ensureUncategorizedCategory()
// If create uncategorized category failed, we need create it again
return reconcile.Result{}, err
}
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmCategoryFinalizer) {
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmCategoryFinalizer)
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
} else {
// The object is being deleted
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmCategoryFinalizer) {
// our finalizer is present, so lets handle our external dependency
// remove our finalizer from the list and update it.
if instance.Status.Total > 0 {
klog.Errorf("can not delete helm category: %s which owns applications", request.String())
return reconcile.Result{}, nil
}
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
return item == HelmCategoryFinalizer
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
err = r.updateCategoryCount(instance.Name)
if err != nil {
klog.Errorf("update helm category: %s status failed, error: %s", instance.Name, err)
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func (r *ReconcileHelmCategory) ensureUncategorizedCategory() error {
ctg := &v1alpha1.HelmCategory{}
err := r.Get(context.TODO(), types.NamespacedName{Name: v1alpha1.UncategorizedId}, ctg)
if err != nil && !errors.IsNotFound(err) {
return err
}
ctg.Name = v1alpha1.UncategorizedId
ctg.Spec.Name = v1alpha1.UncategorizedName
ctg.Spec.Description = v1alpha1.UncategorizedName
err = r.Create(context.TODO(), ctg)
return err
}
func (r *ReconcileHelmCategory) updateCategoryCount(id string) error {
ctg := &v1alpha1.HelmCategory{}
err := r.Get(context.TODO(), types.NamespacedName{Name: id}, ctg)
if err != nil {
return err
}
count, err := r.countApplications(id)
if err != nil {
return err
}
if ctg.Status.Total == count {
return nil
}
ctg.Status.Total = count
err = r.Status().Update(context.TODO(), ctg)
return err
}
func (r *ReconcileHelmCategory) countApplications(id string) (int, error) {
list := v1alpha1.HelmApplicationList{}
err := r.List(context.TODO(), &list, client.MatchingLabels{
constants.CategoryIdLabelKey: id,
constants.ChartRepoIdLabelKey: v1alpha1.AppStoreRepoId,
})
if err != nil {
return 0, err
}
count := 0
// just count active helm application
for _, app := range list.Items {
if app.Status.State == v1alpha1.StateActive {
count += 1
}
}
return count, nil
}
// add category id to helm application
func (r *ReconcileHelmCategory) updateUncategorizedApplicationLabels(app *v1alpha1.HelmApplication) error {
if app == nil {
return nil
}
if app.GetHelmRepoId() == v1alpha1.AppStoreRepoId && app.GetHelmCategoryId() == "" {
appCopy := app.DeepCopy()
appCopy.Labels[constants.CategoryIdLabelKey] = v1alpha1.UncategorizedId
patch := client.MergeFrom(app)
err := r.Client.Patch(context.TODO(), appCopy, patch)
if err != nil {
klog.Errorf("patch application: %s failed, error: %s", app.Name, err)
return err
}
}
return nil
}

View File

@@ -1,106 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmcategory
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/onsi/gomega/gexec"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/apis"
"kubesphere.io/kubesphere/pkg/controller/openpitrix/helmapplication"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var k8sClient client.Client
var k8sManager ctrl.Manager
var testEnv *envtest.Environment
func TestHelmCategoryController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HelmCategory Controller Test Suite")
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
t := true
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "ks-core", "crds")},
AttachControlPlaneOutput: false,
}
}
cfg, err := testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
MetricsBindAddress: "0",
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&ReconcileHelmCategory{}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
err = (&helmapplication.ReconcileHelmApplication{}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
err = (&helmapplication.ReconcileHelmApplicationVersion{}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
gexec.KillAndWait(5 * time.Second)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})

View File

@@ -1,135 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmcategory
import (
"context"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/idutils"
)
var _ = Describe("helmCategory", func() {
const timeout = time.Second * 240
const interval = time.Second * 1
app := createApp()
appVer := createAppVersion(app.GetHelmApplicationId())
ctg := createCtg()
BeforeEach(func() {
err := k8sClient.Create(context.Background(), app)
Expect(err).NotTo(HaveOccurred())
err = k8sClient.Create(context.Background(), appVer)
Expect(err).NotTo(HaveOccurred())
err = k8sClient.Create(context.Background(), ctg)
Expect(err).NotTo(HaveOccurred())
})
Context("Helm category Controller", func() {
It("Should success", func() {
key := types.NamespacedName{
Name: v1alpha1.UncategorizedId,
}
By("Expecting category should exists")
Eventually(func() bool {
f := &v1alpha1.HelmCategory{}
k8sClient.Get(context.Background(), key, f)
return !f.CreationTimestamp.IsZero()
}, timeout, interval).Should(BeTrue())
By("Update helm app version status")
Eventually(func() bool {
k8sClient.Get(context.Background(), types.NamespacedName{Name: appVer.Name}, appVer)
appVer.Status = v1alpha1.HelmApplicationVersionStatus{
State: v1alpha1.StateActive,
}
err := k8sClient.Status().Update(context.Background(), appVer)
return err == nil
}, timeout, interval).Should(BeTrue())
By("Wait for app status become active")
Eventually(func() bool {
appKey := types.NamespacedName{
Name: app.Name,
}
k8sClient.Get(context.Background(), appKey, app)
return app.State() == v1alpha1.StateActive
}, timeout, interval).Should(BeTrue())
By("Reconcile for `uncategorized` category")
Eventually(func() bool {
key := types.NamespacedName{Name: v1alpha1.UncategorizedId}
ctg := v1alpha1.HelmCategory{}
k8sClient.Get(context.Background(), key, &ctg)
return ctg.Status.Total == 1
}, timeout, interval).Should(BeTrue())
})
})
})
func createCtg() *v1alpha1.HelmCategory {
return &v1alpha1.HelmCategory{
ObjectMeta: metav1.ObjectMeta{
Name: idutils.GetUuid36(v1alpha1.HelmCategoryIdPrefix),
},
Spec: v1alpha1.HelmCategorySpec{
Name: "dummy-ctg",
},
}
}
func createApp() *v1alpha1.HelmApplication {
return &v1alpha1.HelmApplication{
ObjectMeta: metav1.ObjectMeta{
Name: idutils.GetUuid36(v1alpha1.HelmApplicationIdPrefix),
},
Spec: v1alpha1.HelmApplicationSpec{
Name: "dummy-chart",
},
}
}
func createAppVersion(appId string) *v1alpha1.HelmApplicationVersion {
return &v1alpha1.HelmApplicationVersion{
ObjectMeta: metav1.ObjectMeta{
Name: idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix),
Labels: map[string]string{
constants.ChartApplicationIdLabelKey: appId,
},
},
Spec: v1alpha1.HelmApplicationVersionSpec{
Metadata: &v1alpha1.Metadata{
Version: "0.0.1",
Name: "dummy-chart",
},
},
}
}

View File

@@ -1,81 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmrelease
import (
"context"
"path"
"strings"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
)
func (r *ReconcileHelmRelease) GetChartData(rls *v1alpha1.HelmRelease) (chartName string, chartData []byte, err error) {
if rls.Spec.RepoId != "" && rls.Spec.RepoId != v1alpha1.AppStoreRepoId {
// load chart data from helm repo
repo := v1alpha1.HelmRepo{}
err := r.Get(context.TODO(), types.NamespacedName{Name: rls.Spec.RepoId}, &repo)
if err != nil {
klog.Errorf("get helm repo %s failed, error: %v", rls.Spec.RepoId, err)
return chartName, chartData, ErrGetRepoFailed
}
index, _ := helmrepoindex.ByteArrayToSavedIndex([]byte(repo.Status.Data))
if version := index.GetApplicationVersion(rls.Spec.ApplicationId, rls.Spec.ApplicationVersionId); version != nil {
url := version.Spec.URLs[0]
if !(strings.HasPrefix(url, "https://") || strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "s3://")) {
url = repo.Spec.Url + "/" + url
}
buf, err := helmrepoindex.LoadChart(context.TODO(), url, &repo.Spec.Credential)
if err != nil {
klog.Infof("load chart failed, error: %s", err)
return chartName, chartData, ErrLoadChartFailed
}
chartData = buf.Bytes()
chartName = version.Name
} else {
klog.Errorf("get app version: %s failed", rls.Spec.ApplicationVersionId)
return chartName, chartData, ErrGetAppVersionFailed
}
} else {
// load chart data from helm application version
appVersion := &v1alpha1.HelmApplicationVersion{}
err = r.Get(context.TODO(), types.NamespacedName{Name: rls.Spec.ApplicationVersionId}, appVersion)
if err != nil {
klog.Errorf("get app version %s failed, error: %v", rls.Spec.ApplicationVersionId, err)
return chartName, chartData, ErrGetAppVersionFailed
}
if r.StorageClient == nil {
return "", nil, ErrS3Config
}
chartData, err = r.StorageClient.Read(path.Join(appVersion.GetWorkspace(), appVersion.Name))
if err != nil {
klog.Errorf("load chart from storage failed, error: %s", err)
return chartName, chartData, ErrLoadChartFromStorageFailed
}
chartName = appVersion.GetTrueName()
}
return
}

View File

@@ -1,429 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helmrelease
import (
"context"
"errors"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/flowcontrol"
"sigs.k8s.io/controller-runtime/pkg/controller"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/api/application/v1alpha1"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
)
const (
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
MaxBackoffTime = 15 * time.Minute
)
var (
ErrGetRepoFailed = errors.New("get repo failed")
ErrGetAppFailed = errors.New("get app failed")
ErrAppVersionDataIsEmpty = errors.New("app version data is empty")
ErrGetAppVersionFailed = errors.New("get app version failed")
ErrLoadChartFailed = errors.New("load chart failed")
ErrS3Config = errors.New("invalid s3 config")
ErrLoadChartFromStorageFailed = errors.New("load chart from storage failed")
)
var _ reconcile.Reconciler = &ReconcileHelmRelease{}
// ReconcileWorkspace reconciles a Workspace object
type ReconcileHelmRelease struct {
StorageClient s3.Interface
KsFactory externalversions.SharedInformerFactory
client.Client
// mock helm install && uninstall
helmMock bool
checkReleaseStatusBackoff *flowcontrol.Backoff
clusterClients clusterclient.ClusterClients
MultiClusterEnable bool
MaxConcurrent int
// wait time when check release is ready or not
WaitTime time.Duration
StopChan <-chan struct{}
}
// =========================>
// ^ |
// | <==upgraded<==upgrading================
// | \ =========^ /
// | | / |
//
// creating=>created===>active=====>deleting=>deleted |
//
// \ ^ / |
// \ | /======> /
// \=>failed<==========================
//
// Reconcile reads that state of the cluster for a helmreleases object and makes changes based on the state read
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
func (r *ReconcileHelmRelease) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
// Fetch the helmReleases instance
instance := &v1alpha1.HelmRelease{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if instance.Status.State == "" {
instance.Status.State = v1alpha1.HelmStatusCreating
instance.Status.LastUpdate = metav1.Now()
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
}
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
clusterName := instance.GetRlsCluster()
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
// cluster not exists, delete the crd
klog.Warningf("cluster %s not found, delete the helm release %s/%s",
clusterName, instance.GetRlsNamespace(), instance.GetTrueName())
return reconcile.Result{}, r.Delete(context.TODO(), instance)
}
// Host cluster will self-healing, delete host cluster won't cause deletion of helm release
if !r.clusterClients.IsHostCluster(clusterInfo) {
// add owner References
instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1alpha1.SchemeGroupVersion.String(),
Kind: clusterv1alpha1.ResourceKindCluster,
Name: clusterInfo.Name,
UID: clusterInfo.UID,
})
}
}
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer)
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
} else {
// The object is being deleting
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
klog.V(3).Infof("helm uninstall %s/%s from host cluster", instance.GetRlsNamespace(), instance.Spec.Name)
err := r.uninstallHelmRelease(instance)
if err != nil {
return reconcile.Result{}, err
}
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
// remove finalizer
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
return item == HelmReleaseFinalizer
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
return r.reconcile(instance)
}
// Check the state of the instance then decide what to do.
func (r *ReconcileHelmRelease) reconcile(instance *v1alpha1.HelmRelease) (reconcile.Result, error) {
var err error
switch instance.Status.State {
case v1alpha1.HelmStatusDeleting:
// no operation
return reconcile.Result{}, nil
case v1alpha1.HelmStatusFailed:
// Release used to be failed, but instance.Status.Version not equal to instance.Spec.Version
if instance.Status.Version > 0 && instance.Status.Version != instance.Spec.Version {
return r.createOrUpgradeHelmRelease(instance, true)
} else {
return reconcile.Result{}, nil
}
case v1alpha1.HelmStatusActive:
// Release used to be active, but instance.Status.Version not equal to instance.Spec.Version
if instance.Status.Version != instance.Spec.Version {
instance.Status.State = v1alpha1.HelmStatusUpgrading
// Update the state first.
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
} else {
return reconcile.Result{}, nil
}
case v1alpha1.HelmStatusCreating:
// create new release
return r.createOrUpgradeHelmRelease(instance, false)
case v1alpha1.HelmStatusUpgrading:
// We can update the release now.
return r.createOrUpgradeHelmRelease(instance, true)
case v1alpha1.HelmStatusCreated, v1alpha1.HelmStatusUpgraded:
if instance.Status.Version != instance.Spec.Version {
// Start a new backoff.
r.checkReleaseStatusBackoff.DeleteEntry(rlsBackoffKey(instance))
instance.Status.State = v1alpha1.HelmStatusUpgrading
err = r.Status().Update(context.TODO(), instance)
return reconcile.Result{}, err
} else {
retry, err := r.checkReleaseIsReady(instance)
return reconcile.Result{RequeueAfter: retry}, err
}
case v1alpha1.HelmStatusRollbacking:
// TODO: rollback helm release
return reconcile.Result{}, nil
}
return reconcile.Result{}, nil
}
func rlsBackoffKey(rls *v1alpha1.HelmRelease) string {
return rls.Name
}
// doCheck check whether helm release's resources are ready or not.
func (r *ReconcileHelmRelease) doCheck(rls *v1alpha1.HelmRelease) (retryAfter time.Duration, err error) {
backoffKey := rlsBackoffKey(rls)
clusterName := rls.GetRlsCluster()
var clusterConfig string
if r.MultiClusterEnable && clusterName != "" {
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
if err != nil {
klog.Errorf("get cluster %s config failed", clusterConfig)
return
}
}
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name,
helmwrapper.SetMock(r.helmMock))
ready, err := hw.IsReleaseReady(r.WaitTime)
if err != nil {
// release resources not ready
klog.Errorf("check release %s/%s status failed, error: %s", rls.GetRlsNamespace(), rls.GetTrueName(), err)
// check status next time
r.checkReleaseStatusBackoff.Next(backoffKey, r.checkReleaseStatusBackoff.Clock.Now())
retryAfter = r.checkReleaseStatusBackoff.Get(backoffKey)
err := r.updateStatus(rls, rls.Status.State, err.Error())
return retryAfter, err
} else {
klog.V(4).Infof("check release %s/%s status success, ready: %v", rls.GetRlsNamespace(), rls.GetTrueName(), ready)
// install or upgrade success, remove the release from the queue.
r.checkReleaseStatusBackoff.DeleteEntry(backoffKey)
// Release resources are ready, it's active now.
err := r.updateStatus(rls, v1alpha1.HelmStatusActive, "")
// If update status failed, the controller need update the status next time.
return 0, err
}
}
// checkReleaseIsReady check whether helm release's are ready or not.
// If retryAfter > 0 , then the controller will recheck it next time.
func (r *ReconcileHelmRelease) checkReleaseIsReady(rls *v1alpha1.HelmRelease) (retryAfter time.Duration, err error) {
backoffKey := rlsBackoffKey(rls)
now := time.Now()
if now.Sub(rls.Status.LastDeployed.Time) > MaxBackoffTime {
klog.V(2).Infof("check release %s/%s too much times, ignore it", rls.GetRlsNamespace(), rls.GetTrueName())
r.checkReleaseStatusBackoff.DeleteEntry(backoffKey)
return 0, nil
}
if !r.checkReleaseStatusBackoff.IsInBackOffSinceUpdate(backoffKey, r.checkReleaseStatusBackoff.Clock.Now()) {
klog.V(4).Infof("start to check release %s/%s status ", rls.GetRlsNamespace(), rls.GetTrueName())
return r.doCheck(rls)
} else {
// backoff, check next time
retryAfter := r.checkReleaseStatusBackoff.Get(backoffKey)
klog.V(4).Infof("check release %s/%s status has been limited by backoff - %v remaining",
rls.GetRlsNamespace(), rls.GetTrueName(), retryAfter)
return retryAfter, nil
}
}
func (r *ReconcileHelmRelease) updateStatus(rls *v1alpha1.HelmRelease, currentState, msg string) error {
now := metav1.Now()
var deployStatus v1alpha1.HelmReleaseDeployStatus
rls.Status.Message = stringutils.ShortenString(msg, v1alpha1.MsgLen)
deployStatus.Message = stringutils.ShortenString(msg, v1alpha1.MsgLen)
deployStatus.State = currentState
deployStatus.Time = now
if rls.Status.State != currentState &&
(currentState == v1alpha1.HelmStatusCreated || currentState == v1alpha1.HelmStatusUpgraded) {
rls.Status.Version = rls.Spec.Version
rls.Status.LastDeployed = &now
}
rls.Status.State = currentState
// record then new state
rls.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus}, rls.Status.DeployStatus...)
if len(rls.Status.DeployStatus) > 10 {
rls.Status.DeployStatus = rls.Status.DeployStatus[:10:10]
}
rls.Status.LastUpdate = now
err := r.Status().Update(context.TODO(), rls)
return err
}
// createOrUpgradeHelmRelease will run helm install to install a new release if upgrade is false,
// run helm upgrade if upgrade is true
func (r *ReconcileHelmRelease) createOrUpgradeHelmRelease(rls *v1alpha1.HelmRelease, upgrade bool) (reconcile.Result, error) {
// Install or upgrade release
var chartData []byte
var err error
_, chartData, err = r.GetChartData(rls)
if err != nil {
return reconcile.Result{}, err
}
if len(chartData) == 0 {
klog.Errorf("empty chart data failed, release name %s, chart name: %s", rls.Name, rls.Spec.ChartName)
return reconcile.Result{}, ErrAppVersionDataIsEmpty
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
if r.MultiClusterEnable && clusterName != "" {
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
if err != nil {
klog.Errorf("get cluster %s config failed", clusterConfig)
return reconcile.Result{}, err
}
}
// If clusterConfig is empty, this application will be installed in current host.
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name,
helmwrapper.SetAnnotations(map[string]string{constants.CreatorAnnotationKey: rls.GetCreator()}),
helmwrapper.SetLabels(map[string]string{
v1alpha1.ApplicationInstance: rls.GetTrueName(),
}),
helmwrapper.SetMock(r.helmMock))
var currentState string
if upgrade {
err = hw.Upgrade(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
currentState = v1alpha1.HelmStatusUpgraded
} else {
err = hw.Install(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
currentState = v1alpha1.HelmStatusCreated
}
var msg string
if err != nil {
// install or upgrade failed
currentState = v1alpha1.HelmStatusFailed
msg = err.Error()
}
err = r.updateStatus(rls, currentState, msg)
return reconcile.Result{}, err
}
func (r *ReconcileHelmRelease) uninstallHelmRelease(rls *v1alpha1.HelmRelease) error {
if rls.Status.State != v1alpha1.HelmStatusDeleting {
rls.Status.State = v1alpha1.HelmStatusDeleting
rls.Status.LastUpdate = metav1.Now()
err := r.Status().Update(context.TODO(), rls)
if err != nil {
return err
}
}
clusterName := rls.GetRlsCluster()
var clusterConfig string
var err error
if r.MultiClusterEnable && clusterName != "" {
clusterInfo, err := r.clusterClients.Get(clusterName)
if err != nil {
klog.V(2).Infof("cluster %s was deleted, skip helm release uninstall", clusterName)
return nil
}
// If user deletes helmRelease first and then delete cluster immediately, this may cause helm resources leak.
if clusterInfo.DeletionTimestamp != nil {
klog.V(2).Infof("cluster %s is deleting, skip helm release uninstall", clusterName)
return nil
}
clusterConfig = string(clusterInfo.Spec.Connection.KubeConfig)
}
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
err = hw.Uninstall()
return err
}
func (r *ReconcileHelmRelease) SetupWithManager(mgr ctrl.Manager) error {
r.Client = mgr.GetClient()
if r.KsFactory != nil && r.MultiClusterEnable {
r.clusterClients = clusterclient.NewClusterClient(r.KsFactory.Cluster().V1alpha1().Clusters())
}
// exponential backoff
r.checkReleaseStatusBackoff = flowcontrol.NewBackOff(2*time.Second, MaxBackoffTime)
go wait.Until(r.checkReleaseStatusBackoff.GC, 1*time.Minute, r.StopChan)
return ctrl.NewControllerManagedBy(mgr).
WithOptions(controller.Options{MaxConcurrentReconciles: r.MaxConcurrent}).
For(&v1alpha1.HelmRelease{}).
Complete(r)
}

Some files were not shown because too many files have changed in this diff Show More