improve multicluster resource controller
Signed-off-by: hongming <talonwan@yunify.com>
This commit is contained in:
@@ -24,14 +24,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
certificateslisters "k8s.io/client-go/listers/certificates/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@@ -55,11 +53,7 @@ type Controller struct {
|
||||
csrInformer certificatesinformers.CertificateSigningRequestInformer
|
||||
csrLister certificateslisters.CertificateSigningRequestLister
|
||||
csrSynced cache.InformerSynced
|
||||
|
||||
cmInformer coreinformers.ConfigMapInformer
|
||||
cmLister corelisters.ConfigMapLister
|
||||
cmSynced cache.InformerSynced
|
||||
|
||||
cmSynced cache.InformerSynced
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
@@ -72,7 +66,8 @@ type Controller struct {
|
||||
kubeconfigOperator kubeconfig.Interface
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, informerFactory informers.SharedInformerFactory, config *rest.Config) *Controller {
|
||||
func NewController(k8sClient kubernetes.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer,
|
||||
configMapInformer corev1informers.ConfigMapInformer, config *rest.Config) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
@@ -82,17 +77,13 @@ func NewController(k8sClient kubernetes.Interface, informerFactory informers.Sha
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
csrInformer := informerFactory.Certificates().V1beta1().CertificateSigningRequests()
|
||||
cmInformer := informerFactory.Core().V1().ConfigMaps()
|
||||
ctl := &Controller{
|
||||
k8sclient: k8sClient,
|
||||
csrInformer: csrInformer,
|
||||
csrLister: csrInformer.Lister(),
|
||||
csrSynced: csrInformer.Informer().HasSynced,
|
||||
cmInformer: cmInformer,
|
||||
cmLister: cmInformer.Lister(),
|
||||
cmSynced: cmInformer.Informer().HasSynced,
|
||||
kubeconfigOperator: kubeconfig.NewOperator(k8sClient, config, ""),
|
||||
cmSynced: configMapInformer.Informer().HasSynced,
|
||||
kubeconfigOperator: kubeconfig.NewOperator(k8sClient, configMapInformer, config),
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CertificateSigningRequest"),
|
||||
recorder: recorder,
|
||||
}
|
||||
@@ -112,7 +103,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the csrInformer factories to begin populating the csrInformer caches
|
||||
klog.Info("Starting User controller")
|
||||
klog.Info("Starting CSR controller")
|
||||
|
||||
// Wait for the caches to be csrSynced before starting workers
|
||||
klog.Info("Waiting for csrInformer caches to sync")
|
||||
|
||||
@@ -100,10 +100,8 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
//init client
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting User controller")
|
||||
klog.Info("Starting ClusterRoleBinding controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
354
pkg/controller/globalrole/globalrole_controller.go
Normal file
354
pkg/controller/globalrole/globalrole_controller.go
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package globalrole
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "GlobalRole synced successfully"
|
||||
controllerName = "globalrole-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
globalRoleInformer iamv1alpha2informers.GlobalRoleInformer
|
||||
globalRoleLister iamv1alpha2listers.GlobalRoleLister
|
||||
globalRoleSynced cache.InformerSynced
|
||||
fedGlobalRoleCache cache.Store
|
||||
fedGlobalRoleCacheController cache.Controller
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, globalRoleInformer iamv1alpha2informers.GlobalRoleInformer,
|
||||
fedGlobalRoleCache cache.Store, fedGlobalRoleCacheController cache.Controller) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
globalRoleInformer: globalRoleInformer,
|
||||
globalRoleLister: globalRoleInformer.Lister(),
|
||||
globalRoleSynced: globalRoleInformer.Informer().HasSynced,
|
||||
fedGlobalRoleCache: fedGlobalRoleCache,
|
||||
fedGlobalRoleCacheController: fedGlobalRoleCacheController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GlobalRole"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
globalRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueClusterRole,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueClusterRole(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueClusterRole,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting GlobalRole controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.globalRoleSynced, c.fedGlobalRoleCacheController.HasSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueClusterRole(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
globalRole, err := c.globalRoleLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("globalrole '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.multiClusterSync(globalRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.recorder.Event(globalRole, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(globalRole *iamv1alpha2.GlobalRole) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(globalRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedGlobalRoleCache.GetByKey(globalRole.Name)
|
||||
if !exist {
|
||||
return c.createFederatedGlobalRole(globalRole)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedGlobalRole iamv1alpha2.FederatedRole
|
||||
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedGlobalRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedGlobalRole.Spec.Template.Rules, globalRole.Rules) ||
|
||||
!reflect.DeepEqual(federatedGlobalRole.Spec.Template.Labels, globalRole.Labels) ||
|
||||
!reflect.DeepEqual(federatedGlobalRole.Spec.Template.Annotations, globalRole.Annotations) {
|
||||
|
||||
federatedGlobalRole.Spec.Template.Rules = globalRole.Rules
|
||||
federatedGlobalRole.Spec.Template.Annotations = globalRole.Annotations
|
||||
federatedGlobalRole.Spec.Template.Labels = globalRole.Labels
|
||||
|
||||
return c.updateFederatedGlobalRole(&federatedGlobalRole)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedGlobalRole(globalRole *iamv1alpha2.GlobalRole) error {
|
||||
federatedGlobalRole := &iamv1alpha2.FederatedRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedGlobalRoleKind,
|
||||
APIVersion: iamv1alpha2.FedGlobalRoleResource.Group + "/" + iamv1alpha2.FedGlobalRoleResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: globalRole.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedRoleSpec{
|
||||
Template: iamv1alpha2.RoleTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: globalRole.Labels,
|
||||
Annotations: globalRole.Annotations,
|
||||
},
|
||||
Rules: globalRole.Rules,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(globalRole, federatedGlobalRole, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedGlobalRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedGlobalRoleResource.Group,
|
||||
iamv1alpha2.FedGlobalRoleResource.Version, iamv1alpha2.FedGlobalRoleResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedGlobalRole(federatedGlobalRole *iamv1alpha2.FederatedRole) error {
|
||||
|
||||
data, err := json.Marshal(federatedGlobalRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedGlobalRoleResource.Group,
|
||||
iamv1alpha2.FedGlobalRoleResource.Version, iamv1alpha2.FedGlobalRoleResource.Name,
|
||||
federatedGlobalRole.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(globalRole *iamv1alpha2.GlobalRole) error {
|
||||
if globalRole.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if globalRole.Labels == nil {
|
||||
globalRole.Labels = make(map[string]string, 0)
|
||||
}
|
||||
globalRole = globalRole.DeepCopy()
|
||||
globalRole.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().GlobalRoles().Update(globalRole)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -33,8 +35,11 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
@@ -43,18 +48,18 @@ const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "GlobalRoleBinding synced successfully"
|
||||
controllerName = "globalrolebinding-controller"
|
||||
federatedClusterRoleBindingKind = "FederatedClusterRoleBinding"
|
||||
federatedResourceVersion = "types.kubefed.io/v1beta1"
|
||||
federatedResourceAPIPath = "/apis/types.kubefed.io/v1beta1/federatedclusterrolebindings"
|
||||
messageResourceSynced = "GlobalRoleBinding synced successfully"
|
||||
controllerName = "globalrolebinding-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
informer iamv1alpha2informers.GlobalRoleBindingInformer
|
||||
lister iamv1alpha2listers.GlobalRoleBindingLister
|
||||
synced cache.InformerSynced
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer
|
||||
globalRoleBindingLister iamv1alpha2listers.GlobalRoleBindingLister
|
||||
globalRoleBindingSynced cache.InformerSynced
|
||||
fedGlobalRoleBindingCache cache.Store
|
||||
fedGlobalRoleBindingCacheController cache.Controller
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
@@ -67,7 +72,8 @@ type Controller struct {
|
||||
multiClusterEnabled bool
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer, multiClusterEnabled bool) *Controller {
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer,
|
||||
fedGlobalRoleBindingCache cache.Store, fedGlobalRoleBindingCacheController cache.Controller, multiClusterEnabled bool) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
@@ -78,13 +84,16 @@ func NewController(k8sClient kubernetes.Interface, globalRoleBindingInformer iam
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
informer: globalRoleBindingInformer,
|
||||
lister: globalRoleBindingInformer.Lister(),
|
||||
synced: globalRoleBindingInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleBinding"),
|
||||
recorder: recorder,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
globalRoleBindingInformer: globalRoleBindingInformer,
|
||||
globalRoleBindingLister: globalRoleBindingInformer.Lister(),
|
||||
globalRoleBindingSynced: globalRoleBindingInformer.Informer().HasSynced,
|
||||
fedGlobalRoleBindingCache: fedGlobalRoleBindingCache,
|
||||
fedGlobalRoleBindingCacheController: fedGlobalRoleBindingCacheController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GlobalRoleBinding"),
|
||||
recorder: recorder,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
globalRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@@ -101,14 +110,19 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
//init client
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting User controller")
|
||||
klog.Info("Starting GlobalRoleBinding controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.synced); !ok {
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.globalRoleBindingSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedGlobalRoleBindingCacheController.HasSynced)
|
||||
}
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
@@ -197,12 +211,12 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
globalRoleBinding, err := c.lister.Get(key)
|
||||
globalRoleBinding, err := c.globalRoleBindingLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("clusterrolebinding '%s' in work queue no longer exists", key))
|
||||
utilruntime.HandleError(fmt.Errorf("globalrolebinding '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
@@ -216,6 +230,13 @@ func (c *Controller) reconcile(key string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(globalRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.recorder.Event(globalRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
@@ -224,89 +245,181 @@ func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) relateToClusterAdmin(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
|
||||
func (c *Controller) multiClusterSync(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
|
||||
|
||||
if c.multiClusterEnabled {
|
||||
|
||||
federatedClusterRoleBinding := &iamv1alpha2.FederatedClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: federatedClusterRoleBindingKind,
|
||||
APIVersion: federatedResourceVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("fed-%s", globalRoleBinding.Name),
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedClusterRoleBindingSpec{
|
||||
Template: iamv1alpha2.Template{
|
||||
Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: iamv1alpha2.ResourceKindClusterRole,
|
||||
Name: iamv1alpha2.ClusterAdmin,
|
||||
},
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(globalRoleBinding, federatedClusterRoleBinding, scheme.Scheme)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedClusterRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(federatedResourceAPIPath).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("fed-%s", globalRoleBinding.Name),
|
||||
},
|
||||
Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: iamv1alpha2.ResourceKindClusterRole,
|
||||
Name: iamv1alpha2.ClusterAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(globalRoleBinding, clusterRoleBinding, scheme.Scheme)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := c.ensureNotControlledByKubefed(globalRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedGlobalRoleBindingCache.GetByKey(globalRoleBinding.Name)
|
||||
if !exist {
|
||||
return c.createFederatedGlobalRoleBinding(globalRoleBinding)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedGlobalRoleBinding iamv1alpha2.FederatedRoleBinding
|
||||
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedGlobalRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Subjects, globalRoleBinding.Subjects) ||
|
||||
!reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.RoleRef, globalRoleBinding.RoleRef) ||
|
||||
!reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Labels, globalRoleBinding.Labels) ||
|
||||
!reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Annotations, globalRoleBinding.Annotations) {
|
||||
|
||||
federatedGlobalRoleBinding.Spec.Template.Subjects = globalRoleBinding.Subjects
|
||||
federatedGlobalRoleBinding.Spec.Template.RoleRef = globalRoleBinding.RoleRef
|
||||
federatedGlobalRoleBinding.Spec.Template.Annotations = globalRoleBinding.Annotations
|
||||
federatedGlobalRoleBinding.Spec.Template.Labels = globalRoleBinding.Labels
|
||||
|
||||
return c.updateFederatedGlobalRoleBinding(&federatedGlobalRoleBinding)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) relateToClusterAdmin(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
|
||||
|
||||
username := findExpectUsername(globalRoleBinding)
|
||||
|
||||
// unexpected
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", username, iamv1alpha2.ClusterAdmin),
|
||||
},
|
||||
Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: iamv1alpha2.ResourceKindClusterRole,
|
||||
Name: iamv1alpha2.ClusterAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(globalRoleBinding, clusterRoleBinding, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findExpectUsername(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) string {
|
||||
for _, subject := range globalRoleBinding.Subjects {
|
||||
if subject.Kind == iamv1alpha2.ResourceKindUser {
|
||||
return subject.Name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedGlobalRoleBinding(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
|
||||
federatedGlobalRoleBinding := &iamv1alpha2.FederatedRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedGlobalRoleBindingKind,
|
||||
APIVersion: iamv1alpha2.FedGlobalRoleBindingResource.Group + "/" + iamv1alpha2.FedGlobalRoleBindingResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: globalRoleBinding.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedRoleBindingSpec{
|
||||
Template: iamv1alpha2.RoleBindingTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: globalRoleBinding.Labels,
|
||||
Annotations: globalRoleBinding.Annotations,
|
||||
},
|
||||
Subjects: globalRoleBinding.Subjects,
|
||||
RoleRef: globalRoleBinding.RoleRef,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(globalRoleBinding, federatedGlobalRoleBinding, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedGlobalRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedGlobalRoleBindingResource.Group,
|
||||
iamv1alpha2.FedGlobalRoleBindingResource.Version, iamv1alpha2.FedGlobalRoleBindingResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedGlobalRoleBinding(federatedGlobalRoleBinding *iamv1alpha2.FederatedRoleBinding) error {
|
||||
|
||||
data, err := json.Marshal(federatedGlobalRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedGlobalRoleBindingResource.Group,
|
||||
iamv1alpha2.FedGlobalRoleBindingResource.Version, iamv1alpha2.FedGlobalRoleBindingResource.Name,
|
||||
federatedGlobalRoleBinding.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {
|
||||
if globalRoleBinding.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if globalRoleBinding.Labels == nil {
|
||||
globalRoleBinding.Labels = make(map[string]string, 0)
|
||||
}
|
||||
globalRoleBinding = globalRoleBinding.DeepCopy()
|
||||
globalRoleBinding.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().GlobalRoleBindings().Update(globalRoleBinding)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,17 +17,23 @@ limitations under the License.
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -37,11 +43,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
/**
|
||||
* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
|
||||
* business logic. Delete these comments after modifying this file.*
|
||||
*/
|
||||
|
||||
// Add creates a new Namespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
@@ -138,7 +139,15 @@ func (r *ReconcileNamespace) Reconcile(request reconcile.Request) (reconcile.Res
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err = r.checkAndBindWorkspace(instance); err != nil {
|
||||
if err = r.bindWorkspace(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.initRoles(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.initCreatorRoleBinding(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -157,7 +166,7 @@ func (r *ReconcileNamespace) isControlledByWorkspace(namespace *corev1.Namespace
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace) error {
|
||||
func (r *ReconcileNamespace) bindWorkspace(namespace *corev1.Namespace) error {
|
||||
|
||||
workspaceName := namespace.Labels[constants.WorkspaceLabelKey]
|
||||
|
||||
@@ -165,7 +174,7 @@ func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
workspace := &v1alpha1.Workspace{}
|
||||
workspace := &tenantv1alpha1.Workspace{}
|
||||
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace)
|
||||
|
||||
@@ -174,18 +183,20 @@ func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace)
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("bind workspace namespace: %s, workspace: %s, error: %s", namespace.Name, workspaceName, err)
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !metav1.IsControlledBy(namespace, workspace) {
|
||||
// federated namespace not controlled by workspace
|
||||
if namespace.Labels[constants.KubefedManagedLabel] != "true" && !metav1.IsControlledBy(namespace, workspace) {
|
||||
namespace.OwnerReferences = removeWorkspaceOwnerReferences(namespace.OwnerReferences)
|
||||
if err := controllerutil.SetControllerReference(workspace, namespace, r.scheme); err != nil {
|
||||
klog.Errorf("bind workspace namespace: %s, workspace: %s, error: %s", namespace.Name, workspaceName, err)
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
err = r.Update(context.TODO(), namespace)
|
||||
if err != nil {
|
||||
klog.Errorf("bind workspace namespace: %s, workspace: %s, error: %s", namespace.Name, workspaceName, err)
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -193,6 +204,16 @@ func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeWorkspaceOwnerReferences(ownerReferences []metav1.OwnerReference) []metav1.OwnerReference {
|
||||
for i, owner := range ownerReferences {
|
||||
if owner.Kind == tenantv1alpha1.ResourceKindWorkspace {
|
||||
ownerReferences = append(ownerReferences[:i], ownerReferences[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
return ownerReferences
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) deleteRouter(namespace string) error {
|
||||
routerName := constants.IngressControllerPrefix + namespace
|
||||
|
||||
@@ -230,5 +251,79 @@ func (r *ReconcileNamespace) deleteRouter(namespace string) error {
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) initRoles(namespace *corev1.Namespace) error {
|
||||
var roleBases iamv1alpha2.RoleBaseList
|
||||
|
||||
err := r.List(context.Background(), &roleBases)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, roleBase := range roleBases.Items {
|
||||
var role rbacv1.Role
|
||||
|
||||
if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil {
|
||||
var old rbacv1.Role
|
||||
err := r.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, &old)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
role.Namespace = namespace.Name
|
||||
err = r.Client.Create(context.Background(), &role)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(role.Labels, old.Labels) ||
|
||||
!reflect.DeepEqual(role.Annotations, old.Annotations) ||
|
||||
!reflect.DeepEqual(role.Rules, old.Rules) {
|
||||
|
||||
old.Labels = role.Labels
|
||||
old.Annotations = role.Annotations
|
||||
old.Rules = role.Rules
|
||||
|
||||
return r.Update(context.Background(), &old)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) initCreatorRoleBinding(namespace *corev1.Namespace) error {
|
||||
if creator := namespace.Annotations[constants.CreatorAnnotationKey]; creator != "" {
|
||||
creatorRoleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", creator, iamv1alpha2.NamespaceAdmin),
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: iamv1alpha2.ResourceKindRole,
|
||||
Name: iamv1alpha2.NamespaceAdmin,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Name: creator,
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := r.Client.Create(context.Background(), creatorRoleBinding)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,12 +17,17 @@ limitations under the License.
|
||||
package user
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@@ -36,7 +41,10 @@ import (
|
||||
kubespherescheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
userinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
userlister "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
@@ -50,12 +58,15 @@ const (
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
kubeconfig kubeconfig.Interface
|
||||
userInformer userinformer.UserInformer
|
||||
userLister userlister.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
kubeconfig kubeconfig.Interface
|
||||
userInformer userinformer.UserInformer
|
||||
userLister userlister.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
cmSynced cache.InformerSynced
|
||||
fedUserCache cache.Store
|
||||
fedUserController cache.Controller
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
@@ -64,11 +75,13 @@ type Controller struct {
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
recorder record.EventRecorder
|
||||
multiClusterEnabled bool
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
|
||||
config *rest.Config, userInformer userinformer.UserInformer) *Controller {
|
||||
config *rest.Config, userInformer userinformer.UserInformer, fedUserCache cache.Store, fedUserController cache.Controller,
|
||||
configMapInformer corev1informers.ConfigMapInformer, multiClusterEnabled bool) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
@@ -81,17 +94,21 @@ func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
var kubeconfigOperator kubeconfig.Interface
|
||||
if config != nil {
|
||||
kubeconfigOperator = kubeconfig.NewOperator(k8sClient, config, "")
|
||||
kubeconfigOperator = kubeconfig.NewOperator(k8sClient, configMapInformer, config)
|
||||
}
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
kubeconfig: kubeconfigOperator,
|
||||
userInformer: userInformer,
|
||||
userLister: userInformer.Lister(),
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"),
|
||||
recorder: recorder,
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
kubeconfig: kubeconfigOperator,
|
||||
userInformer: userInformer,
|
||||
userLister: userInformer.Lister(),
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
cmSynced: configMapInformer.Informer().HasSynced,
|
||||
fedUserCache: fedUserCache,
|
||||
fedUserController: fedUserController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"),
|
||||
recorder: recorder,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
userInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@@ -108,14 +125,19 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
//init client
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting User controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.userSynced); !ok {
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.userSynced, c.cmSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedUserController.HasSynced)
|
||||
}
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
@@ -217,16 +239,22 @@ func (c *Controller) reconcile(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
user, err = c.encryptPassword(user.DeepCopy())
|
||||
|
||||
if err != nil {
|
||||
if user, err = c.ensurePasswordIsEncrypted(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if c.kubeconfig != nil {
|
||||
err = c.kubeconfig.CreateKubeConfig(user)
|
||||
if err != nil {
|
||||
// ensure user kubeconfig configmap is created
|
||||
if err = c.kubeconfig.CreateKubeConfig(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// synchronization through kubefed-controller when multi cluster is enabled
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
@@ -240,9 +268,8 @@ func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
func (c *Controller) ensurePasswordIsEncrypted(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
encrypted, err := strconv.ParseBool(user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation])
|
||||
|
||||
// password is not encrypted
|
||||
if err != nil || !encrypted {
|
||||
password, err := encrypt(user.Spec.EncryptedPassword)
|
||||
@@ -250,21 +277,148 @@ func (c *Controller) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User,
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
user = user.DeepCopy()
|
||||
user.Spec.EncryptedPassword = password
|
||||
if user.Annotations == nil {
|
||||
user.Annotations = make(map[string]string, 0)
|
||||
}
|
||||
|
||||
user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation] = "true"
|
||||
user.Status.State = iamv1alpha2.UserActive
|
||||
|
||||
updated, err := c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
|
||||
return updated, err
|
||||
return c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(user *iamv1alpha2.User) error {
|
||||
if user.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if user.Labels == nil {
|
||||
user.Labels = make(map[string]string, 0)
|
||||
}
|
||||
user = user.DeepCopy()
|
||||
user.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(user *iamv1alpha2.User) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedUserCache.GetByKey(user.Name)
|
||||
if !exist {
|
||||
return c.createFederatedUser(user)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedUser iamv1alpha2.FederatedUser
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedUser); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedUser.Spec.Template.Spec, user.Spec) ||
|
||||
!reflect.DeepEqual(federatedUser.Spec.Template.Status, user.Status) ||
|
||||
!reflect.DeepEqual(federatedUser.Labels, user.Labels) ||
|
||||
!reflect.DeepEqual(federatedUser.Annotations, user.Annotations) {
|
||||
|
||||
federatedUser.Labels = user.Labels
|
||||
federatedUser.Spec.Template.Spec = user.Spec
|
||||
federatedUser.Spec.Template.Status = user.Status
|
||||
federatedUser.Spec.Template.Labels = user.Labels
|
||||
federatedUser.Spec.Template.Annotations = user.Annotations
|
||||
return c.updateFederatedUser(&federatedUser)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedUser(user *iamv1alpha2.User) error {
|
||||
federatedUser := &iamv1alpha2.FederatedUser{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedUserKind,
|
||||
APIVersion: iamv1alpha2.FedUserResource.Group + "/" + iamv1alpha2.FedUserResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: user.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedUserSpec{
|
||||
Template: iamv1alpha2.UserTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: user.Labels,
|
||||
Annotations: user.Annotations,
|
||||
},
|
||||
Spec: user.Spec,
|
||||
Status: user.Status,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// must bind user lifecycle
|
||||
err := controllerutil.SetControllerReference(user, federatedUser, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedUser)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedUserResource.Group,
|
||||
iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedUser(fedUser *iamv1alpha2.FederatedUser) error {
|
||||
data, err := json.Marshal(fedUser)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedUserResource.Group,
|
||||
iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name, fedUser.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encrypt(password string) (string, error) {
|
||||
// when user is already mapped to another identity, password is empty by default
|
||||
// unable to log in directly until password reset
|
||||
|
||||
@@ -92,7 +92,7 @@ func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactor
|
||||
}
|
||||
}
|
||||
|
||||
c := NewController(f.k8sclient, f.ksclient, nil, ksinformers.Iam().V1alpha2().Users())
|
||||
c := NewController(f.k8sclient, f.ksclient, nil, ksinformers.Iam().V1alpha2().Users(), nil, nil, k8sinformers.Core().V1().ConfigMaps(), false)
|
||||
c.userSynced = alwaysReady
|
||||
c.recorder = &record.FakeRecorder{}
|
||||
|
||||
|
||||
@@ -17,12 +17,19 @@ limitations under the License.
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
@@ -117,5 +124,48 @@ func (r *ReconcileWorkspace) Reconcile(request reconcile.Request) (reconcile.Res
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err = r.initRoles(instance); err != nil {
|
||||
klog.Error(err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) initRoles(workspace *tenantv1alpha1.Workspace) error {
|
||||
var roleBases iamv1alpha2.RoleBaseList
|
||||
|
||||
err := r.List(context.Background(), &roleBases)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, roleBase := range roleBases.Items {
|
||||
var role iamv1alpha2.WorkspaceRole
|
||||
|
||||
if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil {
|
||||
var old iamv1alpha2.WorkspaceRole
|
||||
err := r.Client.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-%s", workspace.Name, role.Name)}, &old)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
role.Name = fmt.Sprintf("%s-%s", workspace.Name, role.Name)
|
||||
role.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name
|
||||
return r.Client.Create(context.Background(), &role)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(role.Labels, old.Labels) ||
|
||||
!reflect.DeepEqual(role.Annotations, old.Annotations) ||
|
||||
!reflect.DeepEqual(role.Rules, old.Rules) {
|
||||
|
||||
old.Labels = role.Labels
|
||||
old.Annotations = role.Annotations
|
||||
old.Rules = role.Rules
|
||||
|
||||
return r.Update(context.Background(), &old)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
354
pkg/controller/workspacerole/workspacerole.go
Normal file
354
pkg/controller/workspacerole/workspacerole.go
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerole
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "WorkspaceRole synced successfully"
|
||||
controllerName = "workspacerole-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer
|
||||
workspaceRoleLister iamv1alpha2listers.WorkspaceRoleLister
|
||||
workspaceRoleSynced cache.InformerSynced
|
||||
fedWorkspaceRoleCache cache.Store
|
||||
fedWorkspaceRoleCacheController cache.Controller
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer,
|
||||
fedWorkspaceRoleCache cache.Store, fedWorkspaceRoleCacheController cache.Controller) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
workspaceRoleInformer: workspaceRoleInformer,
|
||||
workspaceRoleLister: workspaceRoleInformer.Lister(),
|
||||
workspaceRoleSynced: workspaceRoleInformer.Informer().HasSynced,
|
||||
fedWorkspaceRoleCache: fedWorkspaceRoleCache,
|
||||
fedWorkspaceRoleCacheController: fedWorkspaceRoleCacheController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceRole"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
workspaceRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueClusterRole,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueClusterRole(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueClusterRole,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting GlobalRole controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.workspaceRoleSynced, c.fedWorkspaceRoleCacheController.HasSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueClusterRole(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
workspaceRole, err := c.workspaceRoleLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("workspacerole '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.multiClusterSync(workspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.recorder.Event(workspaceRole, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(workspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedWorkspaceRoleCache.GetByKey(workspaceRole.Name)
|
||||
if !exist {
|
||||
return c.createFederatedWorkspaceRole(workspaceRole)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedWorkspaceRole iamv1alpha2.FederatedRole
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedWorkspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Rules, workspaceRole.Rules) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Labels, workspaceRole.Labels) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Annotations, workspaceRole.Annotations) {
|
||||
|
||||
federatedWorkspaceRole.Spec.Template.Rules = workspaceRole.Rules
|
||||
federatedWorkspaceRole.Spec.Template.Annotations = workspaceRole.Annotations
|
||||
federatedWorkspaceRole.Spec.Template.Labels = workspaceRole.Labels
|
||||
|
||||
return c.updateFederatedGlobalRole(&federatedWorkspaceRole)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedWorkspaceRole(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
federatedWorkspaceRole := &iamv1alpha2.FederatedRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedWorkspaceRoleKind,
|
||||
APIVersion: iamv1alpha2.FedWorkspaceRoleResource.Group + "/" + iamv1alpha2.FedWorkspaceRoleResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceRole.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedRoleSpec{
|
||||
Template: iamv1alpha2.RoleTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: workspaceRole.Labels,
|
||||
Annotations: workspaceRole.Annotations,
|
||||
},
|
||||
Rules: workspaceRole.Rules,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceRole, federatedWorkspaceRole, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleResource.Version, iamv1alpha2.FedWorkspaceRoleResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedGlobalRole(federatedWorkspaceRole *iamv1alpha2.FederatedRole) error {
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleResource.Version, iamv1alpha2.FedWorkspaceRoleResource.Name,
|
||||
federatedWorkspaceRole.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
if workspaceRole.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if workspaceRole.Labels == nil {
|
||||
workspaceRole.Labels = make(map[string]string, 0)
|
||||
}
|
||||
workspaceRole = workspaceRole.DeepCopy()
|
||||
workspaceRole.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().WorkspaceRoles().Update(workspaceRole)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,432 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerolebinding
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "WorkspaceRoleBinding synced successfully"
|
||||
controllerName = "workspacerolebinding-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
workspaceRoleBindingInformer iamv1alpha2informers.WorkspaceRoleBindingInformer
|
||||
workspaceRoleBindingLister iamv1alpha2listers.WorkspaceRoleBindingLister
|
||||
workspaceRoleBindingSynced cache.InformerSynced
|
||||
fedWorkspaceRoleBindingCache cache.Store
|
||||
fedWorkspaceRoleBindingCacheController cache.Controller
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceRoleBindingInformer iamv1alpha2informers.WorkspaceRoleBindingInformer,
|
||||
fedWorkspaceRoleBindingCache cache.Store, fedWorkspaceRoleBindingCacheController cache.Controller) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
workspaceRoleBindingInformer: workspaceRoleBindingInformer,
|
||||
workspaceRoleBindingLister: workspaceRoleBindingInformer.Lister(),
|
||||
workspaceRoleBindingSynced: workspaceRoleBindingInformer.Informer().HasSynced,
|
||||
fedWorkspaceRoleBindingCache: fedWorkspaceRoleBindingCache,
|
||||
fedWorkspaceRoleBindingCacheController: fedWorkspaceRoleBindingCacheController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceRoleBinding"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
workspaceRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueClusterRoleBinding,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueClusterRoleBinding(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueClusterRoleBinding,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting WorkspaceRoleBinding controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.workspaceRoleBindingSynced, c.fedWorkspaceRoleBindingCacheController.HasSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueClusterRoleBinding(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
workspaceRoleBinding, err := c.workspaceRoleBindingLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("workspacerolebinding '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.multiClusterSync(workspaceRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.recorder.Event(workspaceRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(workspaceRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedWorkspaceRoleBindingCache.GetByKey(workspaceRoleBinding.Name)
|
||||
|
||||
if !exist {
|
||||
return c.createFederatedWorkspaceRoleBinding(workspaceRoleBinding)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedWorkspaceRoleBinding iamv1alpha2.FederatedRoleBinding
|
||||
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedWorkspaceRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Subjects, workspaceRoleBinding.Subjects) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.RoleRef, workspaceRoleBinding.RoleRef) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Labels, workspaceRoleBinding.Labels) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Annotations, workspaceRoleBinding.Annotations) {
|
||||
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Subjects = workspaceRoleBinding.Subjects
|
||||
federatedWorkspaceRoleBinding.Spec.Template.RoleRef = workspaceRoleBinding.RoleRef
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Annotations = workspaceRoleBinding.Annotations
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Labels = workspaceRoleBinding.Labels
|
||||
|
||||
return c.updateFederatedWorkspaceRoleBinding(&federatedWorkspaceRoleBinding)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) relateToClusterAdmin(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
|
||||
username := findExpectUsername(workspaceRoleBinding)
|
||||
|
||||
// unexpected
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", username, iamv1alpha2.ClusterAdmin),
|
||||
},
|
||||
Subjects: ensureSubjectAPIVersionIsValid(workspaceRoleBinding.Subjects),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: iamv1alpha2.ResourceKindClusterRole,
|
||||
Name: iamv1alpha2.ClusterAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceRoleBinding, clusterRoleBinding, scheme.Scheme)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// binding only one user is expected
|
||||
func findExpectUsername(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) string {
|
||||
for _, subject := range workspaceRoleBinding.Subjects {
|
||||
if subject.Kind == iamv1alpha2.ResourceKindUser {
|
||||
return subject.Name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedWorkspaceRoleBinding(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
federatedWorkspaceRoleBinding := &iamv1alpha2.FederatedRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedWorkspaceRoleBindingKind,
|
||||
APIVersion: iamv1alpha2.FedWorkspaceRoleBindingResource.Group + "/" + iamv1alpha2.FedWorkspaceRoleBindingResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceRoleBinding.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedRoleBindingSpec{
|
||||
Template: iamv1alpha2.RoleBindingTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: workspaceRoleBinding.Labels,
|
||||
Annotations: workspaceRoleBinding.Annotations,
|
||||
},
|
||||
Subjects: workspaceRoleBinding.Subjects,
|
||||
RoleRef: workspaceRoleBinding.RoleRef,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceRoleBinding, federatedWorkspaceRoleBinding, scheme.Scheme)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleBindingResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleBindingResource.Version, iamv1alpha2.FedWorkspaceRoleBindingResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedWorkspaceRoleBinding(federatedWorkspaceRoleBinding *iamv1alpha2.FederatedRoleBinding) error {
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleBindingResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleBindingResource.Version, iamv1alpha2.FedWorkspaceRoleBindingResource.Name,
|
||||
federatedWorkspaceRoleBinding.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
if workspaceRoleBinding.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if workspaceRoleBinding.Labels == nil {
|
||||
workspaceRoleBinding.Labels = make(map[string]string, 0)
|
||||
}
|
||||
workspaceRoleBinding = workspaceRoleBinding.DeepCopy()
|
||||
workspaceRoleBinding.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().WorkspaceRoleBindings().Update(workspaceRoleBinding)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureSubjectAPIVersionIsValid(subjects []rbacv1.Subject) []rbacv1.Subject {
|
||||
validSubjects := make([]rbacv1.Subject, 0)
|
||||
for _, subject := range subjects {
|
||||
if subject.Kind == iamv1alpha2.ResourceKindUser {
|
||||
validSubject := rbacv1.Subject{
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Name: subject.Name,
|
||||
}
|
||||
validSubjects = append(validSubjects, validSubject)
|
||||
}
|
||||
}
|
||||
return validSubjects
|
||||
}
|
||||
494
pkg/controller/workspacetemplate/workspacetemplate_controller.go
Normal file
494
pkg/controller/workspacetemplate/workspacetemplate_controller.go
Normal file
@@ -0,0 +1,494 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacetemplate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
tenantv1alpha1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
|
||||
tenantv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
tenantv1alpha1listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha1"
|
||||
tenantv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha2"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "WorkspaceTemplate synced successfully"
|
||||
controllerName = "workspacetemplate-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer
|
||||
workspaceTemplateLister tenantv1alpha2listers.WorkspaceTemplateLister
|
||||
workspaceTemplateSynced cache.InformerSynced
|
||||
workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer
|
||||
workspaceRoleLister iamv1alpha2listers.WorkspaceRoleLister
|
||||
workspaceRoleSynced cache.InformerSynced
|
||||
roleBaseInformer iamv1alpha2informers.RoleBaseInformer
|
||||
roleBaseLister iamv1alpha2listers.RoleBaseLister
|
||||
roleBaseSynced cache.InformerSynced
|
||||
workspaceInformer tenantv1alpha1informers.WorkspaceInformer
|
||||
workspaceLister tenantv1alpha1listers.WorkspaceLister
|
||||
workspaceSynced cache.InformerSynced
|
||||
fedWorkspaceCache cache.Store
|
||||
fedWorkspaceCacheController cache.Controller
|
||||
multiClusterEnabled bool
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer,
|
||||
workspaceInformer tenantv1alpha1informers.WorkspaceInformer, roleBaseInformer iamv1alpha2informers.RoleBaseInformer, workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer,
|
||||
fedWorkspaceCache cache.Store, fedWorkspaceCacheController cache.Controller, multiClusterEnabled bool) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
workspaceTemplateInformer: workspaceTemplateInformer,
|
||||
workspaceTemplateLister: workspaceTemplateInformer.Lister(),
|
||||
workspaceTemplateSynced: workspaceTemplateInformer.Informer().HasSynced,
|
||||
workspaceInformer: workspaceInformer,
|
||||
workspaceLister: workspaceInformer.Lister(),
|
||||
workspaceSynced: workspaceInformer.Informer().HasSynced,
|
||||
workspaceRoleInformer: workspaceRoleInformer,
|
||||
workspaceRoleLister: workspaceRoleInformer.Lister(),
|
||||
workspaceRoleSynced: workspaceRoleInformer.Informer().HasSynced,
|
||||
roleBaseInformer: roleBaseInformer,
|
||||
roleBaseLister: roleBaseInformer.Lister(),
|
||||
roleBaseSynced: roleBaseInformer.Informer().HasSynced,
|
||||
fedWorkspaceCache: fedWorkspaceCache,
|
||||
fedWorkspaceCacheController: fedWorkspaceCacheController,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceTemplate"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
workspaceTemplateInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueClusterRole,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueClusterRole(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueClusterRole,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting GlobalRole controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.workspaceTemplateSynced, c.workspaceSynced, c.workspaceRoleSynced, c.roleBaseSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedWorkspaceCacheController.HasSynced)
|
||||
}
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueClusterRole(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
workspaceTemplate, err := c.workspaceTemplateLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("workspace template '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.initRoles(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = c.sync(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.recorder.Event(workspaceTemplate, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
|
||||
obj, exist, err := c.fedWorkspaceCache.GetByKey(workspaceTemplate.Name)
|
||||
if !exist {
|
||||
return c.createFederatedWorkspace(workspaceTemplate)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var fedWorkspace tenantv1alpha2.FederatedWorkspace
|
||||
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &fedWorkspace); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(fedWorkspace.Spec.Template.Spec, workspaceTemplate.Spec.WorkspaceSpec) ||
|
||||
!reflect.DeepEqual(fedWorkspace.Labels, workspaceTemplate.Labels) ||
|
||||
!reflect.DeepEqual(fedWorkspace.Annotations, workspaceTemplate.Annotations) ||
|
||||
!reflect.DeepEqual(fedWorkspace.Spec.Overrides, workspaceTemplate.Spec.Overrides) {
|
||||
|
||||
fedWorkspace.Spec.Template.Spec = workspaceTemplate.Spec.WorkspaceSpec
|
||||
fedWorkspace.Annotations = workspaceTemplate.Annotations
|
||||
fedWorkspace.Labels = workspaceTemplate.Labels
|
||||
fedWorkspace.Spec.Overrides = workspaceTemplate.Spec.Overrides
|
||||
|
||||
return c.updateFederatedWorkspace(&fedWorkspace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedWorkspace(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
clusters := make([]tenantv1alpha2.Cluster, 0)
|
||||
for _, cluster := range workspaceTemplate.Spec.Clusters {
|
||||
clusters = append(clusters, tenantv1alpha2.Cluster{Name: cluster})
|
||||
}
|
||||
|
||||
federatedWorkspace := &tenantv1alpha2.FederatedWorkspace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: tenantv1alpha2.FedWorkspaceKind,
|
||||
APIVersion: tenantv1alpha2.FedWorkspaceResource.Group + "/" + tenantv1alpha2.FedWorkspaceResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceTemplate.Name,
|
||||
},
|
||||
Spec: tenantv1alpha2.FederatedWorkspaceSpec{
|
||||
Template: tenantv1alpha2.Template{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: workspaceTemplate.Labels,
|
||||
Annotations: workspaceTemplate.Annotations,
|
||||
},
|
||||
Spec: workspaceTemplate.Spec.WorkspaceSpec,
|
||||
},
|
||||
Placement: tenantv1alpha2.Placement{
|
||||
Clusters: clusters,
|
||||
},
|
||||
Overrides: workspaceTemplate.Spec.Overrides,
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceTemplate, federatedWorkspace, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedWorkspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", tenantv1alpha2.FedWorkspaceResource.Group,
|
||||
tenantv1alpha2.FedWorkspaceResource.Version, tenantv1alpha2.FedWorkspaceResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedWorkspace(fedWorkspace *tenantv1alpha2.FederatedWorkspace) error {
|
||||
|
||||
data, err := json.Marshal(fedWorkspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", tenantv1alpha2.FedWorkspaceResource.Group,
|
||||
tenantv1alpha2.FedWorkspaceResource.Version, tenantv1alpha2.FedWorkspaceResource.Name,
|
||||
fedWorkspace.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) sync(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
workspace, err := c.workspaceLister.Get(workspaceTemplate.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return c.createWorkspace(workspaceTemplate)
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(workspace.Spec, workspaceTemplate.Spec.WorkspaceSpec) ||
|
||||
!reflect.DeepEqual(workspace.Labels, workspaceTemplate.Labels) ||
|
||||
!reflect.DeepEqual(workspace.Annotations, workspaceTemplate.Annotations) {
|
||||
|
||||
workspace = workspace.DeepCopy()
|
||||
workspace.Spec = workspaceTemplate.Spec.WorkspaceSpec
|
||||
workspace.Annotations = workspaceTemplate.Annotations
|
||||
workspace.Labels = workspaceTemplate.Labels
|
||||
|
||||
return c.updateWorkspace(workspace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createWorkspace(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
workspace := &tenantv1alpha1.Workspace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceTemplate.Name,
|
||||
Labels: workspaceTemplate.Labels,
|
||||
Annotations: workspaceTemplate.Annotations,
|
||||
},
|
||||
Spec: workspaceTemplate.Spec.WorkspaceSpec,
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceTemplate, workspace, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.ksClient.TenantV1alpha1().Workspaces().Create(workspace)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateWorkspace(workspace *tenantv1alpha1.Workspace) error {
|
||||
_, err := c.ksClient.TenantV1alpha1().Workspaces().Update(workspace)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Controller) initRoles(workspace *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
roleBases, err := r.roleBaseLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, roleBase := range roleBases {
|
||||
var role iamv1alpha2.WorkspaceRole
|
||||
if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil {
|
||||
old, err := r.workspaceRoleLister.Get(fmt.Sprintf("%s-%s", workspace.Name, role.Name))
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
role.Name = fmt.Sprintf("%s-%s", workspace.Name, role.Name)
|
||||
if role.Labels == nil {
|
||||
role.Labels = make(map[string]string, 0)
|
||||
}
|
||||
role.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name
|
||||
_, err = r.ksClient.IamV1alpha2().WorkspaceRoles().Create(&role)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(role.Annotations, old.Annotations) ||
|
||||
!reflect.DeepEqual(role.Rules, old.Rules) {
|
||||
updated := old.DeepCopy()
|
||||
updated.Annotations = role.Annotations
|
||||
updated.Rules = role.Rules
|
||||
|
||||
_, err = r.ksClient.IamV1alpha2().WorkspaceRoles().Update(updated)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user