feat: kubesphere 4.0 (#6115)
* feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> * feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> --------- Signed-off-by: ci-bot <ci-bot@kubesphere.io> Co-authored-by: ks-ci-bot <ks-ci-bot@example.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
committed by
GitHub
parent
b5015ec7b9
commit
447a51f08b
@@ -1,238 +1,113 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package clusterrolebinding
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinfomers "k8s.io/client-go/informers/core/v1"
|
||||
rbacv1informers "k8s.io/client-go/informers/rbac/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
rbacv1listers "k8s.io/client-go/listers/rbac/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"github.com/go-logr/logr"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubectl"
|
||||
rbachelper "kubesphere.io/kubesphere/pkg/componenthelper/auth/rbac"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
rbacutils "kubesphere.io/kubesphere/pkg/utils/rbac"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "ClusterRoleBinding synced successfully"
|
||||
controllerName = "clusterrolebinding-controller"
|
||||
controllerName = "clusterrolebinding"
|
||||
roleBindingRef = "iam.kubesphere.io/clusterrolebinding-ref"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer
|
||||
clusterRoleBindingLister rbacv1listers.ClusterRoleBindingLister
|
||||
clusterRoleBindingSynced cache.InformerSynced
|
||||
userSynced cache.InformerSynced
|
||||
deploymentSynced cache.InformerSynced
|
||||
podSynced cache.InformerSynced
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
kubectlOperator kubectl.Interface
|
||||
var _ kscontroller.Controller = &Reconciler{}
|
||||
var _ reconcile.Reconciler = &Reconciler{}
|
||||
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
logger logr.Logger
|
||||
recorder record.EventRecorder
|
||||
helper *rbachelper.Helper
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, clusterRoleBindingInformer rbacv1informers.ClusterRoleBindingInformer,
|
||||
deploymentInformer appsv1informers.DeploymentInformer, podInformer coreinfomers.PodInformer,
|
||||
userInformer iamv1alpha2informers.UserInformer, kubectlImage string) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
clusterRoleBindingInformer: clusterRoleBindingInformer,
|
||||
clusterRoleBindingLister: clusterRoleBindingInformer.Lister(),
|
||||
clusterRoleBindingSynced: clusterRoleBindingInformer.Informer().HasSynced,
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
deploymentSynced: deploymentInformer.Informer().HasSynced,
|
||||
podSynced: podInformer.Informer().HasSynced,
|
||||
kubectlOperator: kubectl.NewOperator(k8sClient, deploymentInformer, podInformer, userInformer, kubectlImage),
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleBinding"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
clusterRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueClusterRoleBinding,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueClusterRoleBinding(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueClusterRoleBinding,
|
||||
})
|
||||
return ctl
|
||||
func (r *Reconciler) Name() string {
|
||||
return controllerName
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
r.logger = ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
r.recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
r.Client = mgr.GetClient()
|
||||
r.helper = rbachelper.NewHelper(mgr.GetClient())
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting ClusterRoleBinding controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.clusterRoleBindingSynced, c.userSynced, c.deploymentSynced, c.podSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
For(&iamv1beta1.ClusterRoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueClusterRoleBinding(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.logger.WithValues("ClusterRoleBinding", req.String())
|
||||
ctx = logr.NewContext(ctx, log)
|
||||
clusterRole := &iamv1beta1.ClusterRoleBinding{}
|
||||
if err := r.Get(ctx, req.NamespacedName, clusterRole); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
|
||||
if err := r.syncToKubernetes(ctx, clusterRole); err != nil {
|
||||
log.Error(err, "sync cluster role binding failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
func (r *Reconciler) syncToKubernetes(ctx context.Context, clusterRoleBinding *iamv1beta1.ClusterRoleBinding) error {
|
||||
k8sClusterRolBinding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: rbacutils.RelatedK8sResourceName(clusterRoleBinding.Name)},
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, k8sClusterRolBinding, func() error {
|
||||
if k8sClusterRolBinding.Labels == nil {
|
||||
k8sClusterRolBinding.Labels = make(map[string]string)
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
k8sClusterRolBinding.Labels[roleBindingRef] = clusterRoleBinding.Name
|
||||
k8sClusterRolBinding.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: clusterRoleBinding.RoleRef.Kind,
|
||||
Name: rbacutils.RelatedK8sResourceName(clusterRoleBinding.RoleRef.Name),
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// reconcile compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
// Get the clusterRoleBinding with this name
|
||||
clusterRoleBinding, err := c.clusterRoleBindingLister.Get(key)
|
||||
if err != nil {
|
||||
// The resource may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("clusterrolebinding '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if clusterRoleBinding.RoleRef.Name == iamv1alpha2.ClusterAdmin {
|
||||
var subjects []rbacv1.Subject
|
||||
for _, subject := range clusterRoleBinding.Subjects {
|
||||
if subject.Kind == iamv1alpha2.ResourceKindUser {
|
||||
err = c.kubectlOperator.CreateKubectlDeploy(subject.Name, clusterRoleBinding)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
newSubject := rbacv1.Subject{
|
||||
Kind: subject.Kind,
|
||||
Name: subject.Name,
|
||||
Namespace: subject.Namespace,
|
||||
}
|
||||
if subject.Kind != rbacv1.ServiceAccountKind {
|
||||
newSubject.APIGroup = rbacv1.GroupName
|
||||
}
|
||||
subjects = append(subjects, newSubject)
|
||||
}
|
||||
k8sClusterRolBinding.Subjects = subjects
|
||||
if err := controllerutil.SetOwnerReference(clusterRoleBinding, k8sClusterRolBinding, r.Scheme()); err != nil {
|
||||
return fmt.Errorf("failed to set owner reference: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
r.logger.Error(err, "sync cluster role binding failed", "cluster role binding", clusterRoleBinding.Name)
|
||||
}
|
||||
|
||||
c.recorder.Event(clusterRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
r.logger.V(4).Info("sync cluster role binding to K8s", "cluster role binding", clusterRoleBinding.Name, "op", op)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(ctx context.Context) error {
|
||||
return c.Run(4, ctx.Done())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user