From 447a5a562f529d2f8abf390cb8c166c54e7ae07a Mon Sep 17 00:00:00 2001 From: "Roland.Ma" Date: Fri, 23 Oct 2020 03:33:15 +0000 Subject: [PATCH 1/5] feat: add group and groupbinding controller Signed-off-by: Roland.Ma --- cmd/controller-manager/app/controllers.go | 10 + pkg/controller/group/group_controller.go | 307 +++++++++++++++++ pkg/controller/group/group_controller_test.go | 262 ++++++++++++++ .../groupbinding/groupbinding_controller.go | 324 ++++++++++++++++++ pkg/controller/user/user_controller.go | 28 +- 5 files changed, 928 insertions(+), 3 deletions(-) create mode 100644 pkg/controller/group/group_controller.go create mode 100644 pkg/controller/group/group_controller_test.go create mode 100644 pkg/controller/groupbinding/groupbinding_controller.go diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index aabcb9546..973bd800f 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -31,6 +31,8 @@ import ( "kubesphere.io/kubesphere/pkg/controller/devopsproject" "kubesphere.io/kubesphere/pkg/controller/globalrole" "kubesphere.io/kubesphere/pkg/controller/globalrolebinding" + "kubesphere.io/kubesphere/pkg/controller/group" + "kubesphere.io/kubesphere/pkg/controller/groupbinding" "kubesphere.io/kubesphere/pkg/controller/job" "kubesphere.io/kubesphere/pkg/controller/network/ippool" "kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy" @@ -258,6 +260,12 @@ func addControllers( kubesphereInformer.Types().V1beta1().FederatedWorkspaces(), multiClusterEnabled) + groupBindingController := groupbinding.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Iam().V1alpha2().GroupBindings()) + + groupController := group.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Iam().V1alpha2().Groups()) + var clusterController manager.Runnable if multiClusterEnabled { clusterController = cluster.NewClusterController( @@ -319,6 +327,8 @@ func addControllers( "workspacerole-controller": workspaceRoleController, "workspacerolebinding-controller": workspaceRoleBindingController, "ippool-controller": ippoolController, + "groupbinding-controller": groupBindingController, + "group-controller": groupController, } if devopsClient != nil { diff --git a/pkg/controller/group/group_controller.go b/pkg/controller/group/group_controller.go new file mode 100644 index 000000000..02886be4f --- /dev/null +++ b/pkg/controller/group/group_controller.go @@ -0,0 +1,307 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + iam1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" + iamv1alpha1listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" +) + +const ( + // SuccessSynced is used as part of the Event 'reason' when a Foo is synced + successSynced = "Synced" + // is synced successfully + messageResourceSynced = "Group synced successfully" + controllerName = "groupbinding-controller" + finalizer = "finalizers.kubesphere.io/groups" +) + +type Controller struct { + scheme *runtime.Scheme + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + groupInformer iamv1alpha2informers.GroupInformer + groupLister iamv1alpha1listers.GroupLister + groupSynced cache.InformerSynced + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +// NewController creates Group Controller instance +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, groupInformer iamv1alpha2informers.GroupInformer) *Controller { + // Create event broadcaster + // Add sample-controller types to the default Kubernetes Scheme so Events can be + // logged for sample-controller types. + + klog.V(4).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) + ctl := &Controller{ + k8sClient: k8sClient, + ksClient: ksClient, + groupInformer: groupInformer, + groupLister: groupInformer.Lister(), + groupSynced: groupInformer.Informer().HasSynced, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Group"), + recorder: recorder, + } + klog.Info("Setting up event handlers") + groupInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctl.enqueueGroup, + UpdateFunc: func(old, new interface{}) { + ctl.enqueueGroup(new) + }, + DeleteFunc: ctl.enqueueGroup, + }) + return ctl +} + +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting Group controller") + + // Wait for the caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + + synced := []cache.InformerSynced{c.groupSynced} + + if ok := cache.WaitForCacheSync(stopCh, synced...); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + klog.Info("Starting workers") + // Launch two workers to process Foo resources + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started workers") + <-stopCh + klog.Info("Shutting down workers") + return nil +} + +func (c *Controller) enqueueGroup(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the reconcile, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.reconcile(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced %s:%s", "key", key) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Foo resource +// with the current status of the resource. +func (c *Controller) reconcile(key string) error { + + group, err := c.groupLister.Get(key) + if err != nil { + // The user may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("group '%s' in work queue no longer exists", key)) + return nil + } + klog.Error(err) + return err + } + if group.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. + if !sliceutil.HasString(group.Finalizers, finalizer) { + group.ObjectMeta.Finalizers = append(group.ObjectMeta.Finalizers, finalizer) + + if group, err = c.ksClient.IamV1alpha2().Groups().Update(group); err != nil { + return err + } + // Skip reconcile when group is updated. + return nil + } + } else { + // The object is being deleted + if sliceutil.HasString(group.ObjectMeta.Finalizers, finalizer) { + if err = c.deleteGroupBindings(group); err != nil { + klog.Error(err) + return err + } + + if err = c.deleteRoleBindings(group); err != nil { + klog.Error(err) + return err + } + + // remove our finalizer from the list and update it. + group.Finalizers = sliceutil.RemoveString(group.ObjectMeta.Finalizers, func(item string) bool { + return item == finalizer + }) + + if group, err = c.ksClient.IamV1alpha2().Groups().Update(group); err != nil { + return err + } + } + // Our finalizer has finished, so the reconciler can do nothing. + return nil + } + + c.recorder.Event(group, corev1.EventTypeNormal, successSynced, messageResourceSynced) + return nil +} + +func (c *Controller) Start(stopCh <-chan struct{}) error { + return c.Run(4, stopCh) +} + +func (c *Controller) deleteGroupBindings(group *iam1alpha2.Group) error { + + // Groupbindings that created by kubeshpere will be deleted directly. + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{iam1alpha2.GroupReferenceLabel: group.Name}).String(), + } + deleteOptions := metav1.NewDeleteOptions(0) + + if err := c.ksClient.IamV1alpha2().GroupBindings(). + DeleteCollection(deleteOptions, listOptions); err != nil { + klog.Error(err) + return err + } + return nil +} + +func (c *Controller) deleteRoleBindings(group *iam1alpha2.Group) error { + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{iam1alpha2.GroupReferenceLabel: group.Name}).String(), + } + deleteOptions := metav1.NewDeleteOptions(0) + + if err := c.ksClient.IamV1alpha2().WorkspaceRoleBindings(). + DeleteCollection(deleteOptions, listOptions); err != nil { + klog.Error(err) + return err + } + + if err := c.k8sClient.RbacV1().ClusterRoleBindings(). + DeleteCollection(deleteOptions, listOptions); err != nil { + klog.Error(err) + return err + } + + if result, err := c.k8sClient.CoreV1().Namespaces().List(metav1.ListOptions{}); err != nil { + klog.Error(err) + return err + } else { + for _, namespace := range result.Items { + if err = c.k8sClient.RbacV1().RoleBindings(namespace.Name).DeleteCollection(deleteOptions, listOptions); err != nil { + klog.Error(err) + return err + } + } + } + + return nil +} diff --git a/pkg/controller/group/group_controller_test.go b/pkg/controller/group/group_controller_test.go new file mode 100644 index 000000000..d7edc7a5b --- /dev/null +++ b/pkg/controller/group/group_controller_test.go @@ -0,0 +1,262 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "reflect" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + kubeinformers "k8s.io/client-go/informers" + k8sfake "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake" + ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + alwaysReady = func() bool { return true } + noResyncPeriodFunc = func() time.Duration { return 0 } +) + +type fixture struct { + t *testing.T + + ksclient *fake.Clientset + k8sclient *k8sfake.Clientset + // Objects to put in the store. + groupLister []*v1alpha2.Group + // Actions expected to happen on the client. + kubeactions []core.Action + actions []core.Action + // Objects from here preloaded into NewSimpleFake. + kubeobjects []runtime.Object + objects []runtime.Object +} + +func newFixture(t *testing.T) *fixture { + f := &fixture{} + f.t = t + f.objects = []runtime.Object{} + f.kubeobjects = []runtime.Object{} + return f +} + +func newGroup(name string) *v1alpha2.Group { + return &v1alpha2.Group{ + TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha2.GroupSpec{}, + } +} + +func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) { + f.ksclient = fake.NewSimpleClientset(f.objects...) + f.k8sclient = k8sfake.NewSimpleClientset(f.kubeobjects...) + + ksinformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc()) + k8sinformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc()) + + for _, group := range f.groupLister { + err := ksinformers.Iam().V1alpha2().Groups().Informer().GetIndexer().Add(group) + if err != nil { + f.t.Errorf("add group:%s", err) + } + } + + c := NewController(f.k8sclient, f.ksclient, + ksinformers.Iam().V1alpha2().Groups()) + c.groupSynced = alwaysReady + c.recorder = &record.FakeRecorder{} + + return c, ksinformers, k8sinformers +} + +func (f *fixture) run(userName string) { + f.runController(userName, true, false) +} + +func (f *fixture) runExpectError(userName string) { + f.runController(userName, true, true) +} + +func (f *fixture) runController(group string, startInformers bool, expectError bool) { + c, i, k8sI := f.newController() + if startInformers { + stopCh := make(chan struct{}) + defer close(stopCh) + i.Start(stopCh) + k8sI.Start(stopCh) + } + + err := c.reconcile(group) + if !expectError && err != nil { + f.t.Errorf("error syncing group: %v", err) + } else if expectError && err == nil { + f.t.Error("expected error syncing group, got nil") + } + + actions := filterInformerActions(f.ksclient.Actions()) + for i, action := range actions { + if len(f.actions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:]) + break + } + + expectedAction := f.actions[i] + checkAction(expectedAction, action, f.t) + } + + if len(f.actions) > len(actions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):]) + } + + k8sActions := filterInformerActions(f.k8sclient.Actions()) + for i, action := range k8sActions { + if len(f.kubeactions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:]) + break + } + + expectedAction := f.kubeactions[i] + checkAction(expectedAction, action, f.t) + } + + if len(f.kubeactions) > len(k8sActions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):]) + } +} + +// checkAction verifies that expected and actual actions are equal and both have +// same attached resources +func checkAction(expected, actual core.Action, t *testing.T) { + if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) { + t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual) + return + } + + if reflect.TypeOf(actual) != reflect.TypeOf(expected) { + t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual) + return + } + + switch a := actual.(type) { + case core.CreateActionImpl: + e, _ := expected.(core.CreateActionImpl) + expObject := e.GetObject() + object := a.GetObject() + + if !reflect.DeepEqual(expObject, object) { + t.Errorf("Action %s %s has wrong object\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object)) + } + case core.UpdateActionImpl: + e, _ := expected.(core.UpdateActionImpl) + expObject := e.GetObject() + object := a.GetObject() + expUser := expObject.(*v1alpha2.Group) + group := object.(*v1alpha2.Group) + + if !reflect.DeepEqual(expUser, group) { + t.Errorf("Action %s %s has wrong object\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object)) + } + case core.PatchActionImpl: + e, _ := expected.(core.PatchActionImpl) + expPatch := e.GetPatch() + patch := a.GetPatch() + + if !reflect.DeepEqual(expPatch, patch) { + t.Errorf("Action %s %s has wrong patch\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch)) + } + default: + t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it", + actual.GetVerb(), actual.GetResource().Resource) + } +} + +// filterInformerActions filters list and watch actions for testing resources. +// Since list and watch don't change resource state we can filter it to lower +// nose level in our tests. +func filterInformerActions(actions []core.Action) []core.Action { + var ret []core.Action + for _, action := range actions { + if !action.Matches("update", "groups") { + continue + } + ret = append(ret, action) + } + + return ret +} + +func (f *fixture) expectUpdateGroupsFinalizerAction(group *v1alpha2.Group) { + expect := group.DeepCopy() + expect.Finalizers = []string{"finalizers.kubesphere.io/groups"} + action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "groups"}, "", expect) + f.actions = append(f.actions, action) +} + +func (f *fixture) expectUpdateGroupsDeleteAction(group *v1alpha2.Group) { + expect := group.DeepCopy() + expect.Finalizers = []string{} + action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "groups"}, "", expect) + f.actions = append(f.actions, action) +} + +func getKey(group *v1alpha2.Group, t *testing.T) string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(group) + if err != nil { + t.Errorf("Unexpected error getting key for group %v: %v", group.Name, err) + return "" + } + return key +} + +func TestDoNothing(t *testing.T) { + f := newFixture(t) + group := newGroup("test") + + f.groupLister = append(f.groupLister, group) + f.objects = append(f.objects, group) + + f.expectUpdateGroupsFinalizerAction(group) + f.run(getKey(group, t)) + + f = newFixture(t) + + deletedGroup := group.DeepCopy() + deletedGroup.Finalizers = []string{"finalizers.kubesphere.io/groups"} + now := metav1.Now() + deletedGroup.ObjectMeta.DeletionTimestamp = &now + + f.groupLister = append(f.groupLister, deletedGroup) + f.objects = append(f.objects, deletedGroup) + f.expectUpdateGroupsDeleteAction(deletedGroup) + f.run(getKey(deletedGroup, t)) +} diff --git a/pkg/controller/groupbinding/groupbinding_controller.go b/pkg/controller/groupbinding/groupbinding_controller.go new file mode 100644 index 000000000..4f7a7776c --- /dev/null +++ b/pkg/controller/groupbinding/groupbinding_controller.go @@ -0,0 +1,324 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package groupbinding + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" + iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // SuccessSynced is used as part of the Event 'reason' when a Foo is synced + successSynced = "Synced" + // is synced successfully + messageResourceSynced = "GroupBinding synced successfully" + controllerName = "groupbinding-controller" + finalizer = "finalizers.kubesphere.io/groupsbindings" +) + +type Controller struct { + scheme *runtime.Scheme + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + groupBindingInformer iamv1alpha2informers.GroupBindingInformer + groupBindingLister iamv1alpha2listers.GroupBindingLister + groupBindingSynced cache.InformerSynced + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +// NewController creates GroupBinding Controller instance +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, groupBindingInformer iamv1alpha2informers.GroupBindingInformer) *Controller { + // Create event broadcaster + // Add sample-controller types to the default Kubernetes Scheme so Events can be + // logged for sample-controller types. + + klog.V(4).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) + ctl := &Controller{ + k8sClient: k8sClient, + ksClient: ksClient, + groupBindingInformer: groupBindingInformer, + groupBindingLister: groupBindingInformer.Lister(), + groupBindingSynced: groupBindingInformer.Informer().HasSynced, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GroupBinding"), + recorder: recorder, + } + klog.Info("Setting up event handlers") + groupBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctl.enqueueGroupBinding, + UpdateFunc: func(old, new interface{}) { + ctl.enqueueGroupBinding(new) + }, + DeleteFunc: ctl.enqueueGroupBinding, + }) + return ctl +} + +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting GroupBinding controller") + + // Wait for the caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + + synced := []cache.InformerSynced{c.groupBindingSynced} + + if ok := cache.WaitForCacheSync(stopCh, synced...); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + klog.Info("Starting workers") + // Launch two workers to process Foo resources + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started workers") + <-stopCh + klog.Info("Shutting down workers") + return nil +} + +func (c *Controller) enqueueGroupBinding(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the reconcile, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.reconcile(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced %s:%s", "key", key) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Foo resource +// with the current status of the resource. +func (c *Controller) reconcile(key string) error { + + groupBinding, err := c.groupBindingLister.Get(key) + if err != nil { + // The user may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("groupbinding '%s' in work queue no longer exists", key)) + return nil + } + klog.Error(err) + return err + } + if groupBinding.ObjectMeta.DeletionTimestamp.IsZero() { + + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. + if !sliceutil.HasString(groupBinding.Finalizers, finalizer) { + groupBinding.ObjectMeta.Finalizers = append(groupBinding.ObjectMeta.Finalizers, finalizer) + if groupBinding, err = c.ksClient.IamV1alpha2().GroupBindings().Update(groupBinding); err != nil { + return err + } + // Skip reconcile when groupbinding is updated. + return nil + } + } else { + // The object is being deleted + if sliceutil.HasString(groupBinding.ObjectMeta.Finalizers, finalizer) { + if err = c.bindUser(groupBinding); err != nil { + klog.Error(err) + return err + } + + // remove our finalizer from the list and update it. + groupBinding.Finalizers = sliceutil.RemoveString(groupBinding.ObjectMeta.Finalizers, func(item string) bool { + return item == finalizer + }) + + if groupBinding, err = c.ksClient.IamV1alpha2().GroupBindings().Update(groupBinding); err != nil { + return err + } + } + // Our finalizer has finished, so the reconciler can do nothing. + return nil + } + + if err = c.bindUser(groupBinding); err != nil { + klog.Error(err) + return err + } + + c.recorder.Event(groupBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced) + return nil +} + +func (c *Controller) Start(stopCh <-chan struct{}) error { + return c.Run(4, stopCh) +} + +// Udpate user's Group property. So no need to query user's groups when authorizing. +func (c *Controller) bindUser(groupBinding *iamv1alpha2.GroupBinding) error { + + users := make([]string, 0) + // Ignore the user if the user if being deleted. + for _, u := range groupBinding.Users { + if user, err := c.ksClient.IamV1alpha2().Users().Get(u, metav1.GetOptions{}); err == nil && user.ObjectMeta.DeletionTimestamp.IsZero() { + users = append(users, u) + } + } + + // Nothing to do + if len(users) == 0 { + return nil + } + + // Get all GroupBindings and check whether user exists in the Group. + listOptions := metav1.ListOptions{} + groupBindingList, err := c.ksClient.IamV1alpha2().GroupBindings().List(listOptions) + if err != nil { + klog.Error(err) + return err + } + + userGroups := make(map[string][]string) + for _, item := range groupBindingList.Items { + if item.ObjectMeta.DeletionTimestamp.IsZero() { + for _, u := range users { + if sliceutil.HasString(item.Users, u) { + if userGroups[u] == nil { + userGroups[u] = make([]string, 0) + } + userGroups[u] = append(userGroups[u], item.GroupRef.Name) + } + } + } + } + for k, v := range userGroups { + if err := c.patchUser(k, v); err != nil { + if errors.IsNotFound(err) { + klog.Infof("user %s doesn't exist any more", k) + return nil + } + klog.Error(err) + return err + } + } + return nil +} + +func (c *Controller) patchUser(userName string, groups []string) error { + if user, err := c.ksClient.IamV1alpha2().Users().Get(userName, metav1.GetOptions{}); err == nil && user.ObjectMeta.DeletionTimestamp.IsZero() { + newUser := user.DeepCopy() + newUser.Spec.Groups = groups + patch := client.MergeFrom(user) + patchData, _ := patch.Data(newUser) + if _, err := c.ksClient.IamV1alpha2().Users(). + Patch(userName, patch.Type(), patchData); err != nil { + return err + } + } else { + return err + } + return nil +} diff --git a/pkg/controller/user/user_controller.go b/pkg/controller/user/user_controller.go index 8ee3ed7eb..4409d5269 100644 --- a/pkg/controller/user/user_controller.go +++ b/pkg/controller/user/user_controller.go @@ -19,6 +19,10 @@ package user import ( "encoding/json" "fmt" + "reflect" + "strconv" + "time" + "golang.org/x/crypto/bcrypt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -49,10 +53,7 @@ import ( "kubesphere.io/kubesphere/pkg/simple/client/devops" ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" "kubesphere.io/kubesphere/pkg/utils/sliceutil" - "reflect" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "strconv" - "time" ) const ( @@ -296,6 +297,11 @@ func (c *Controller) reconcile(key string) error { return err } + if err = c.deleteGroupBindings(user); err != nil { + klog.Error(err) + return err + } + if c.devopsClient != nil { // unassign jenkins role, unassign multiple times is allowed if err := c.unassignDevOpsAdminRole(user); err != nil { @@ -552,6 +558,22 @@ func (c *Controller) ldapSync(user *iamv1alpha2.User) error { } } +func (c *Controller) deleteGroupBindings(user *iamv1alpha2.User) error { + + // Groupbindings that created by kubeshpere will be deleted directly. + listOptions := metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{iamv1alpha2.UserReferenceLabel: user.Name}).String(), + } + deleteOptions := metav1.NewDeleteOptions(0) + + if err := c.ksClient.IamV1alpha2().GroupBindings(). + DeleteCollection(deleteOptions, listOptions); err != nil { + klog.Error(err) + return err + } + return nil +} + func (c *Controller) deleteRoleBindings(user *iamv1alpha2.User) error { listOptions := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{iamv1alpha2.UserReferenceLabel: user.Name}).String(), From 80f3db3d42eaef46b2a671d205fbf31fe3d1b4c0 Mon Sep 17 00:00:00 2001 From: "Roland.Ma" Date: Fri, 23 Oct 2020 03:34:29 +0000 Subject: [PATCH 2/5] feat: authentication users with group's RoleBindings in API Server Signed-off-by: Roland.Ma --- pkg/apiserver/apiserver.go | 11 +- .../authenticators/basic/basic.go | 3 +- .../authenticators/jwttoken/jwt_token.go | 13 ++- .../authorization/authorizerfactory/rbac.go | 5 +- pkg/kapis/iam/v1alpha2/handler.go | 87 +++++++++++---- pkg/models/iam/am/am.go | 103 +++++++++++------- pkg/models/iam/im/authenticator.go | 8 +- pkg/models/tenant/devops.go | 3 +- pkg/models/tenant/tenant.go | 13 ++- 9 files changed, 162 insertions(+), 84 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 141482110..ec45754c1 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -20,6 +20,10 @@ import ( "bytes" "context" "fmt" + "net/http" + rt "runtime" + "time" + "github.com/emicklei/go-restful" "k8s.io/apimachinery/pkg/runtime/schema" urlruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -77,9 +81,6 @@ import ( "kubesphere.io/kubesphere/pkg/simple/client/s3" "kubesphere.io/kubesphere/pkg/simple/client/sonarqube" utilnet "kubesphere.io/kubesphere/pkg/utils/net" - "net/http" - rt "runtime" - "time" ) const ( @@ -290,7 +291,7 @@ func (s *APIServer) buildHandlerChain(stopCh <-chan struct{}) { // authenticators are unordered authn := unionauth.New(anonymous.NewAuthenticator(), basictoken.New(basic.NewBasicAuthenticator(im.NewPasswordAuthenticator(s.KubernetesClient.KubeSphere(), s.InformerFactory.KubeSphereSharedInformerFactory().Iam().V1alpha2().Users().Lister(), s.Config.AuthenticationOptions))), - bearertoken.New(jwttoken.NewTokenAuthenticator(im.NewTokenOperator(s.CacheClient, s.Config.AuthenticationOptions)))) + bearertoken.New(jwttoken.NewTokenAuthenticator(im.NewTokenOperator(s.CacheClient, s.Config.AuthenticationOptions), s.InformerFactory.KubeSphereSharedInformerFactory().Iam().V1alpha2().Users().Lister()))) handler = filters.WithAuthentication(handler, authn, loginRecorder) handler = filters.WithRequestInfo(handler, requestInfoResolver) s.Server.Handler = handler @@ -378,6 +379,8 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error { {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "workspaceroles"}, {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "workspacerolebindings"}, {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "loginrecords"}, + {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groups"}, + {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, {Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "clusters"}, {Group: "devops.kubesphere.io", Version: "v1alpha3", Resource: "devopsprojects"}, } diff --git a/pkg/apiserver/authentication/authenticators/basic/basic.go b/pkg/apiserver/authentication/authenticators/basic/basic.go index 2ad09dc43..7f40efc2d 100644 --- a/pkg/apiserver/authentication/authenticators/basic/basic.go +++ b/pkg/apiserver/authentication/authenticators/basic/basic.go @@ -18,6 +18,7 @@ package basic import ( "context" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "kubesphere.io/kubesphere/pkg/models/iam/im" @@ -50,7 +51,7 @@ func (t *basicAuthenticator) AuthenticatePassword(ctx context.Context, username, User: &user.DefaultInfo{ Name: providedUser.GetName(), UID: providedUser.GetUID(), - Groups: []string{user.AllAuthenticated}, + Groups: append(providedUser.GetGroups(), user.AllAuthenticated), }, }, true, nil } diff --git a/pkg/apiserver/authentication/authenticators/jwttoken/jwt_token.go b/pkg/apiserver/authentication/authenticators/jwttoken/jwt_token.go index fdc827bdf..a75734fd7 100644 --- a/pkg/apiserver/authentication/authenticators/jwttoken/jwt_token.go +++ b/pkg/apiserver/authentication/authenticators/jwttoken/jwt_token.go @@ -18,9 +18,11 @@ package jwttoken import ( "context" + "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/klog" + iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" "kubesphere.io/kubesphere/pkg/models/iam/im" ) @@ -31,11 +33,13 @@ import ( // because some resources are public accessible. type tokenAuthenticator struct { tokenOperator im.TokenManagementInterface + userLister iamv1alpha2listers.UserLister } -func NewTokenAuthenticator(tokenOperator im.TokenManagementInterface) authenticator.Token { +func NewTokenAuthenticator(tokenOperator im.TokenManagementInterface, userLister iamv1alpha2listers.UserLister) authenticator.Token { return &tokenAuthenticator{ tokenOperator: tokenOperator, + userLister: userLister, } } @@ -46,11 +50,16 @@ func (t *tokenAuthenticator) AuthenticateToken(ctx context.Context, token string return nil, false, err } + dbUser, err := t.userLister.Get(providedUser.GetName()) + if err != nil { + return nil, false, err + } + return &authenticator.Response{ User: &user.DefaultInfo{ Name: providedUser.GetName(), UID: providedUser.GetUID(), - Groups: []string{user.AllAuthenticated}, + Groups: append(dbUser.Spec.Groups, user.AllAuthenticated), }, }, true, nil } diff --git a/pkg/apiserver/authorization/authorizerfactory/rbac.go b/pkg/apiserver/authorization/authorizerfactory/rbac.go index 9d7bbabff..b845136bb 100644 --- a/pkg/apiserver/authorization/authorizerfactory/rbac.go +++ b/pkg/apiserver/authorization/authorizerfactory/rbac.go @@ -22,6 +22,7 @@ import ( "bytes" "context" "fmt" + "github.com/open-policy-agent/opa/rego" "k8s.io/apiserver/pkg/authentication/serviceaccount" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" @@ -259,7 +260,7 @@ func (r *RBACAuthorizer) visitRulesFor(requestAttributes authorizer.Attributes, workspace = requestAttributes.GetWorkspace() } - if workspaceRoleBindings, err := r.am.ListWorkspaceRoleBindings("", workspace); err != nil { + if workspaceRoleBindings, err := r.am.ListWorkspaceRoleBindings("", nil, workspace); err != nil { if !visitor(nil, "", nil, err) { return } @@ -304,7 +305,7 @@ func (r *RBACAuthorizer) visitRulesFor(requestAttributes authorizer.Attributes, } } - if roleBindings, err := r.am.ListRoleBindings("", namespace); err != nil { + if roleBindings, err := r.am.ListRoleBindings("", nil, namespace); err != nil { if !visitor(nil, "", nil, err) { return } diff --git a/pkg/kapis/iam/v1alpha2/handler.go b/pkg/kapis/iam/v1alpha2/handler.go index 8f18a69ba..d4cde88d1 100644 --- a/pkg/kapis/iam/v1alpha2/handler.go +++ b/pkg/kapis/iam/v1alpha2/handler.go @@ -18,6 +18,8 @@ package v1alpha2 import ( "fmt" + "strings" + "github.com/emicklei/go-restful" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -34,7 +36,6 @@ import ( "kubesphere.io/kubesphere/pkg/models/iam/am" "kubesphere.io/kubesphere/pkg/models/iam/im" servererr "kubesphere.io/kubesphere/pkg/server/errors" - "strings" ) type iamHandler struct { @@ -141,7 +142,14 @@ func (h *iamHandler) RetrieveMemberRoleTemplates(request *restful.Request, respo if strings.HasSuffix(request.Request.URL.Path, iamv1alpha2.ResourcesPluralWorkspaceRole) { workspace := request.PathParameter("workspace") username := request.PathParameter("workspacemember") - workspaceRole, err := h.am.GetWorkspaceRoleOfUser(username, workspace) + + user, err := h.im.DescribeUser(username) + if err != nil { + api.HandleInternalError(response, request, err) + return + } + + workspaceRoles, err := h.am.GetWorkspaceRoleOfUser(username, user.Spec.Groups, workspace) if err != nil { // if role binding not exist return empty list if errors.IsNotFound(err) { @@ -151,19 +159,33 @@ func (h *iamHandler) RetrieveMemberRoleTemplates(request *restful.Request, respo api.HandleInternalError(response, request, err) return } + templateRoles := make(map[string]*rbacv1.Role) + for _, role := range workspaceRoles { + // merge template Role + result, err := h.am.ListWorkspaceRoles(&query.Query{ + Pagination: query.NoPagination, + SortBy: "", + Ascending: false, + Filters: map[query.Field]query.Value{iamv1alpha2.AggregateTo: query.Value(role.Name)}, + }) - result, err := h.am.ListWorkspaceRoles(&query.Query{ - Pagination: query.NoPagination, - SortBy: "", - Ascending: false, - Filters: map[query.Field]query.Value{iamv1alpha2.AggregateTo: query.Value(workspaceRole.Name)}, - }) - if err != nil { - api.HandleInternalError(response, request, err) - return + if err != nil { + api.HandleInternalError(response, request, err) + return + } + + for _, obj := range result.Items { + templateRole := obj.(*rbacv1.Role) + templateRoles[templateRole.Name] = templateRole + } } - response.WriteEntity(result.Items) + results := make([]*rbacv1.Role, 0, len(templateRoles)) + for _, value := range templateRoles { + results = append(results, value) + } + + response.WriteEntity(results) return } @@ -175,8 +197,13 @@ func (h *iamHandler) RetrieveMemberRoleTemplates(request *restful.Request, respo return } - role, err := h.am.GetNamespaceRoleOfUser(username, namespace) + user, err := h.im.DescribeUser(username) + if err != nil { + api.HandleInternalError(response, request, err) + return + } + roles, err := h.am.GetNamespaceRoleOfUser(username, user.Spec.Groups, namespace) if err != nil { // if role binding not exist return empty list if errors.IsNotFound(err) { @@ -187,19 +214,33 @@ func (h *iamHandler) RetrieveMemberRoleTemplates(request *restful.Request, respo return } - result, err := h.am.ListRoles(namespace, &query.Query{ - Pagination: query.NoPagination, - SortBy: "", - Ascending: false, - Filters: map[query.Field]query.Value{iamv1alpha2.AggregateTo: query.Value(role.Name)}, - }) + templateRoles := make(map[string]*rbacv1.Role) + for _, role := range roles { + // merge template Role + result, err := h.am.ListRoles(namespace, &query.Query{ + Pagination: query.NoPagination, + SortBy: "", + Ascending: false, + Filters: map[query.Field]query.Value{iamv1alpha2.AggregateTo: query.Value(role.Name)}, + }) - if err != nil { - api.HandleInternalError(response, request, err) - return + if err != nil { + api.HandleInternalError(response, request, err) + return + } + + for _, obj := range result.Items { + templateRole := obj.(*rbacv1.Role) + templateRoles[templateRole.Name] = templateRole + } } - response.WriteEntity(result.Items) + results := make([]*rbacv1.Role, 0, len(templateRoles)) + for _, value := range templateRoles { + results = append(results, value) + } + + response.WriteEntity(results) return } } diff --git a/pkg/models/iam/am/am.go b/pkg/models/iam/am/am.go index 2a78981d2..4ad733ee2 100644 --- a/pkg/models/iam/am/am.go +++ b/pkg/models/iam/am/am.go @@ -18,6 +18,7 @@ package am import ( "encoding/json" "fmt" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -33,21 +34,22 @@ import ( kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" "kubesphere.io/kubesphere/pkg/informers" resourcev1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" ) type AccessManagementInterface interface { GetGlobalRoleOfUser(username string) (*iamv1alpha2.GlobalRole, error) - GetWorkspaceRoleOfUser(username, workspace string) (*iamv1alpha2.WorkspaceRole, error) + GetWorkspaceRoleOfUser(username string, groups []string, workspace string) ([]*iamv1alpha2.WorkspaceRole, error) GetClusterRoleOfUser(username string) (*rbacv1.ClusterRole, error) - GetNamespaceRoleOfUser(username, namespace string) (*rbacv1.Role, error) + GetNamespaceRoleOfUser(username string, groups []string, namespace string) ([]*rbacv1.Role, error) ListRoles(namespace string, query *query.Query) (*api.ListResult, error) ListClusterRoles(query *query.Query) (*api.ListResult, error) ListWorkspaceRoles(query *query.Query) (*api.ListResult, error) ListGlobalRoles(query *query.Query) (*api.ListResult, error) ListGlobalRoleBindings(username string) ([]*iamv1alpha2.GlobalRoleBinding, error) ListClusterRoleBindings(username string) ([]*rbacv1.ClusterRoleBinding, error) - ListWorkspaceRoleBindings(username, workspace string) ([]*iamv1alpha2.WorkspaceRoleBinding, error) - ListRoleBindings(username, namespace string) ([]*rbacv1.RoleBinding, error) + ListWorkspaceRoleBindings(username string, groups []string, workspace string) ([]*iamv1alpha2.WorkspaceRoleBinding, error) + ListRoleBindings(username string, groups []string, namespace string) ([]*rbacv1.RoleBinding, error) GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) (string, []rbacv1.PolicyRule, error) GetGlobalRole(globalRole string) (*iamv1alpha2.GlobalRole, error) GetWorkspaceRole(workspace string, name string) (*iamv1alpha2.WorkspaceRole, error) @@ -124,9 +126,9 @@ func (am *amOperator) GetGlobalRoleOfUser(username string) (*iamv1alpha2.GlobalR return nil, err } -func (am *amOperator) GetWorkspaceRoleOfUser(username, workspace string) (*iamv1alpha2.WorkspaceRole, error) { +func (am *amOperator) GetWorkspaceRoleOfUser(username string, groups []string, workspace string) ([]*iamv1alpha2.WorkspaceRole, error) { - userRoleBindings, err := am.ListWorkspaceRoleBindings(username, workspace) + userRoleBindings, err := am.ListWorkspaceRoleBindings(username, groups, workspace) if err != nil { klog.Error(err) @@ -134,23 +136,29 @@ func (am *amOperator) GetWorkspaceRoleOfUser(username, workspace string) (*iamv1 } if len(userRoleBindings) > 0 { - role, err := am.GetWorkspaceRole(workspace, userRoleBindings[0].RoleRef.Name) + roles := make([]*iamv1alpha2.WorkspaceRole, len(userRoleBindings)) + for i, roleBinding := range userRoleBindings { + role, err := am.GetWorkspaceRole(workspace, roleBinding.RoleRef.Name) - if err != nil { - klog.Error(err) - return nil, err + if err != nil { + klog.Error(err) + return nil, err + } + + out := role.DeepCopy() + if out.Annotations == nil { + out.Annotations = make(map[string]string, 0) + } + out.Annotations[iamv1alpha2.WorkspaceRoleAnnotation] = role.Name + + roles[i] = out } if len(userRoleBindings) > 1 { - klog.Warningf("conflict workspace role binding, username: %s", username) + klog.Infof("conflict workspace role binding, username: %s", username) } - out := role.DeepCopy() - if out.Annotations == nil { - out.Annotations = make(map[string]string, 0) - } - out.Annotations[iamv1alpha2.WorkspaceRoleAnnotation] = role.Name - return out, nil + return roles, nil } err = errors.NewNotFound(iamv1alpha2.Resource(iamv1alpha2.ResourcesSingularWorkspaceRoleBinding), username) @@ -158,8 +166,9 @@ func (am *amOperator) GetWorkspaceRoleOfUser(username, workspace string) (*iamv1 return nil, err } -func (am *amOperator) GetNamespaceRoleOfUser(username, namespace string) (*rbacv1.Role, error) { - userRoleBindings, err := am.ListRoleBindings(username, namespace) +func (am *amOperator) GetNamespaceRoleOfUser(username string, groups []string, namespace string) ([]*rbacv1.Role, error) { + + userRoleBindings, err := am.ListRoleBindings(username, groups, namespace) if err != nil { klog.Error(err) @@ -167,21 +176,27 @@ func (am *amOperator) GetNamespaceRoleOfUser(username, namespace string) (*rbacv } if len(userRoleBindings) > 0 { - role, err := am.GetNamespaceRole(namespace, userRoleBindings[0].RoleRef.Name) - if err != nil { - klog.Error(err) - return nil, err - } - if len(userRoleBindings) > 1 { - klog.Warningf("conflict role binding, username: %s", username) + roles := make([]*rbacv1.Role, len(userRoleBindings)) + for i, roleBinding := range userRoleBindings { + role, err := am.GetNamespaceRole(namespace, roleBinding.RoleRef.Name) + if err != nil { + klog.Error(err) + return nil, err + } + + out := role.DeepCopy() + if out.Annotations == nil { + out.Annotations = make(map[string]string, 0) + } + out.Annotations[iamv1alpha2.RoleAnnotation] = role.Name + + roles[i] = out } - out := role.DeepCopy() - if out.Annotations == nil { - out.Annotations = make(map[string]string, 0) + if len(userRoleBindings) > 1 { + klog.Infof("conflict role binding, username: %s", username) } - out.Annotations[iamv1alpha2.RoleAnnotation] = role.Name - return out, nil + return roles, nil } err = errors.NewNotFound(iamv1alpha2.Resource(iamv1alpha2.ResourcesSingularRoleBinding), username) @@ -221,7 +236,7 @@ func (am *amOperator) GetClusterRoleOfUser(username string) (*rbacv1.ClusterRole return nil, err } -func (am *amOperator) ListWorkspaceRoleBindings(username, workspace string) ([]*iamv1alpha2.WorkspaceRoleBinding, error) { +func (am *amOperator) ListWorkspaceRoleBindings(username string, groups []string, workspace string) ([]*iamv1alpha2.WorkspaceRoleBinding, error) { roleBindings, err := am.resourceGetter.List(iamv1alpha2.ResourcesPluralWorkspaceRoleBinding, "", query.New()) if err != nil { @@ -233,7 +248,7 @@ func (am *amOperator) ListWorkspaceRoleBindings(username, workspace string) ([]* for _, obj := range roleBindings.Items { roleBinding := obj.(*iamv1alpha2.WorkspaceRoleBinding) inSpecifiedWorkspace := workspace == "" || roleBinding.Labels[tenantv1alpha1.WorkspaceLabel] == workspace - if contains(roleBinding.Subjects, username) && inSpecifiedWorkspace { + if contains(roleBinding.Subjects, username, groups) && inSpecifiedWorkspace { result = append(result, roleBinding) } } @@ -252,7 +267,7 @@ func (am *amOperator) ListClusterRoleBindings(username string) ([]*rbacv1.Cluste result := make([]*rbacv1.ClusterRoleBinding, 0) for _, obj := range roleBindings.Items { roleBinding := obj.(*rbacv1.ClusterRoleBinding) - if contains(roleBinding.Subjects, username) { + if contains(roleBinding.Subjects, username, nil) { result = append(result, roleBinding) } } @@ -271,7 +286,7 @@ func (am *amOperator) ListGlobalRoleBindings(username string) ([]*iamv1alpha2.Gl for _, obj := range roleBindings.Items { roleBinding := obj.(*iamv1alpha2.GlobalRoleBinding) - if contains(roleBinding.Subjects, username) { + if contains(roleBinding.Subjects, username, nil) { result = append(result, roleBinding) } } @@ -279,7 +294,7 @@ func (am *amOperator) ListGlobalRoleBindings(username string) ([]*iamv1alpha2.Gl return result, nil } -func (am *amOperator) ListRoleBindings(username, namespace string) ([]*rbacv1.RoleBinding, error) { +func (am *amOperator) ListRoleBindings(username string, groups []string, namespace string) ([]*rbacv1.RoleBinding, error) { roleBindings, err := am.resourceGetter.List(iamv1alpha2.ResourcesPluralRoleBinding, namespace, query.New()) if err != nil { klog.Error(err) @@ -289,14 +304,14 @@ func (am *amOperator) ListRoleBindings(username, namespace string) ([]*rbacv1.Ro result := make([]*rbacv1.RoleBinding, 0) for _, obj := range roleBindings.Items { roleBinding := obj.(*rbacv1.RoleBinding) - if contains(roleBinding.Subjects, username) { + if contains(roleBinding.Subjects, username, groups) { result = append(result, roleBinding) } } return result, nil } -func contains(subjects []rbacv1.Subject, username string) bool { +func contains(subjects []rbacv1.Subject, username string, groups []string) bool { // if username is nil means list all role bindings if username == "" { return true @@ -305,6 +320,9 @@ func contains(subjects []rbacv1.Subject, username string) bool { if subject.Kind == rbacv1.UserKind && subject.Name == username { return true } + if subject.Kind == rbacv1.GroupKind && sliceutil.HasString(groups, subject.Name) { + return true + } } return false } @@ -557,7 +575,7 @@ func (am *amOperator) CreateWorkspaceRoleBinding(username string, workspace stri return err } - roleBindings, err := am.ListWorkspaceRoleBindings(username, workspace) + roleBindings, err := am.ListWorkspaceRoleBindings(username, nil, workspace) if err != nil { klog.Error(err) return err @@ -666,7 +684,8 @@ func (am *amOperator) CreateNamespaceRoleBinding(username string, namespace stri return err } - roleBindings, err := am.ListRoleBindings(username, namespace) + // Don't pass user's groups. + roleBindings, err := am.ListRoleBindings(username, nil, namespace) if err != nil { klog.Error(err) return err @@ -714,7 +733,7 @@ func (am *amOperator) CreateNamespaceRoleBinding(username string, namespace stri func (am *amOperator) RemoveUserFromWorkspace(username string, workspace string) error { - roleBindings, err := am.ListWorkspaceRoleBindings(username, workspace) + roleBindings, err := am.ListWorkspaceRoleBindings(username, nil, workspace) if err != nil { klog.Error(err) return err @@ -736,7 +755,7 @@ func (am *amOperator) RemoveUserFromWorkspace(username string, workspace string) func (am *amOperator) RemoveUserFromNamespace(username string, namespace string) error { - roleBindings, err := am.ListRoleBindings(username, namespace) + roleBindings, err := am.ListRoleBindings(username, nil, namespace) if err != nil { klog.Error(err) return err diff --git a/pkg/models/iam/im/authenticator.go b/pkg/models/iam/im/authenticator.go index 8c4e69422..f528bb02b 100644 --- a/pkg/models/iam/im/authenticator.go +++ b/pkg/models/iam/im/authenticator.go @@ -20,6 +20,8 @@ package im import ( "fmt" + "net/mail" + "github.com/go-ldap/ldap" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -32,7 +34,6 @@ import ( kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" "kubesphere.io/kubesphere/pkg/constants" - "net/mail" ) var ( @@ -131,8 +132,9 @@ func (im *passwordAuthenticator) Authenticate(username, password string) (authus if checkPasswordHash(password, user.Spec.EncryptedPassword) { return &authuser.DefaultInfo{ - Name: user.Name, - UID: string(user.UID), + Name: user.Name, + UID: string(user.UID), + Groups: user.Spec.Groups, }, nil } diff --git a/pkg/models/tenant/devops.go b/pkg/models/tenant/devops.go index dc235e7cb..3c3f92a0f 100644 --- a/pkg/models/tenant/devops.go +++ b/pkg/models/tenant/devops.go @@ -18,6 +18,7 @@ package tenant import ( "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -66,7 +67,7 @@ func (t *tenantOperator) ListDevOpsProjects(user user.Info, workspace string, qu return result, nil } - roleBindings, err := t.am.ListRoleBindings(user.GetName(), "") + roleBindings, err := t.am.ListRoleBindings(user.GetName(), user.GetGroups(), "") if err != nil { klog.Error(err) return nil, err diff --git a/pkg/models/tenant/tenant.go b/pkg/models/tenant/tenant.go index aaba90f2f..55c5dfd7a 100644 --- a/pkg/models/tenant/tenant.go +++ b/pkg/models/tenant/tenant.go @@ -20,6 +20,9 @@ import ( "encoding/json" "fmt" "io" + "strings" + "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,8 +56,6 @@ import ( eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events" loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging" "kubesphere.io/kubesphere/pkg/utils/stringutils" - "strings" - "time" ) type Interface interface { @@ -134,7 +135,7 @@ func (t *tenantOperator) ListWorkspaces(user user.Info, queryParam *query.Query) } // retrieving associated resources through role binding - workspaceRoleBindings, err := t.am.ListWorkspaceRoleBindings(user.GetName(), "") + workspaceRoleBindings, err := t.am.ListWorkspaceRoleBindings(user.GetName(), user.GetGroups(), "") if err != nil { klog.Error(err) return nil, err @@ -205,7 +206,7 @@ func (t *tenantOperator) ListFederatedNamespaces(user user.Info, workspace strin } // retrieving associated resources through role binding - roleBindings, err := t.am.ListRoleBindings(user.GetName(), "") + roleBindings, err := t.am.ListRoleBindings(user.GetName(), user.GetGroups(), "") if err != nil { klog.Error(err) return nil, err @@ -273,7 +274,7 @@ func (t *tenantOperator) ListNamespaces(user user.Info, workspace string, queryP } // retrieving associated resources through role binding - roleBindings, err := t.am.ListRoleBindings(user.GetName(), "") + roleBindings, err := t.am.ListRoleBindings(user.GetName(), user.GetGroups(), "") if err != nil { klog.Error(err) return nil, err @@ -472,7 +473,7 @@ func (t *tenantOperator) ListClusters(user user.Info) (*api.ListResult, error) { return result, nil } - workspaceRoleBindings, err := t.am.ListWorkspaceRoleBindings(user.GetName(), "") + workspaceRoleBindings, err := t.am.ListWorkspaceRoleBindings(user.GetName(), user.GetGroups(), "") if err != nil { klog.Error(err) From fc5235ae2969b70becca73eb8cead9b2cde4d547 Mon Sep 17 00:00:00 2001 From: "Roland.Ma" Date: Fri, 6 Nov 2020 01:43:27 +0000 Subject: [PATCH 3/5] refine groupbinding controller Signed-off-by: Roland.Ma --- .../groupbinding/groupbinding_controller.go | 98 +++++++++---------- 1 file changed, 48 insertions(+), 50 deletions(-) diff --git a/pkg/controller/groupbinding/groupbinding_controller.go b/pkg/controller/groupbinding/groupbinding_controller.go index 4f7a7776c..568ba122f 100644 --- a/pkg/controller/groupbinding/groupbinding_controller.go +++ b/pkg/controller/groupbinding/groupbinding_controller.go @@ -226,7 +226,7 @@ func (c *Controller) reconcile(key string) error { } else { // The object is being deleted if sliceutil.HasString(groupBinding.ObjectMeta.Finalizers, finalizer) { - if err = c.bindUser(groupBinding); err != nil { + if err = c.unbindUser(groupBinding); err != nil { klog.Error(err) return err } @@ -257,67 +257,65 @@ func (c *Controller) Start(stopCh <-chan struct{}) error { return c.Run(4, stopCh) } -// Udpate user's Group property. So no need to query user's groups when authorizing. -func (c *Controller) bindUser(groupBinding *iamv1alpha2.GroupBinding) error { - - users := make([]string, 0) - // Ignore the user if the user if being deleted. - for _, u := range groupBinding.Users { - if user, err := c.ksClient.IamV1alpha2().Users().Get(u, metav1.GetOptions{}); err == nil && user.ObjectMeta.DeletionTimestamp.IsZero() { - users = append(users, u) +func (c *Controller) unbindUser(groupBinding *iamv1alpha2.GroupBinding) error { + return c.updateUserGroups(groupBinding, func(groups []string, group string) (bool, []string) { + // remove a group from the groups + if sliceutil.HasString(groups, group) { + groups := sliceutil.RemoveString(groups, func(item string) bool { + return item == group + }) + return true, groups } - } + return false, groups + }) +} - // Nothing to do - if len(users) == 0 { - return nil - } +func (c *Controller) bindUser(groupBinding *iamv1alpha2.GroupBinding) error { + return c.updateUserGroups(groupBinding, func(groups []string, group string) (bool, []string) { + // add group to the groups + if !sliceutil.HasString(groups, group) { + groups := append(groups, group) + return true, groups + } + return false, groups + }) +} - // Get all GroupBindings and check whether user exists in the Group. - listOptions := metav1.ListOptions{} - groupBindingList, err := c.ksClient.IamV1alpha2().GroupBindings().List(listOptions) - if err != nil { - klog.Error(err) - return err - } +// Udpate user's Group property. So no need to query user's groups when authorizing. +func (c *Controller) updateUserGroups(groupBinding *iamv1alpha2.GroupBinding, operator func(groups []string, group string) (bool, []string)) error { - userGroups := make(map[string][]string) - for _, item := range groupBindingList.Items { - if item.ObjectMeta.DeletionTimestamp.IsZero() { - for _, u := range users { - if sliceutil.HasString(item.Users, u) { - if userGroups[u] == nil { - userGroups[u] = make([]string, 0) + for _, u := range groupBinding.Users { + // Ignore the user if the user if being deleted. + if user, err := c.ksClient.IamV1alpha2().Users().Get(u, metav1.GetOptions{}); err == nil && user.ObjectMeta.DeletionTimestamp.IsZero() { + + if errors.IsNotFound(err) { + klog.Infof("user %s doesn't exist any more", u) + continue + } + + if changed, groups := operator(user.Spec.Groups, groupBinding.GroupRef.Name); changed { + + if err := c.patchUser(user, groups); err != nil { + if errors.IsNotFound(err) { + klog.Infof("user %s doesn't exist any more", u) + continue } - userGroups[u] = append(userGroups[u], item.GroupRef.Name) + klog.Error(err) + return err } } } } - for k, v := range userGroups { - if err := c.patchUser(k, v); err != nil { - if errors.IsNotFound(err) { - klog.Infof("user %s doesn't exist any more", k) - return nil - } - klog.Error(err) - return err - } - } return nil } -func (c *Controller) patchUser(userName string, groups []string) error { - if user, err := c.ksClient.IamV1alpha2().Users().Get(userName, metav1.GetOptions{}); err == nil && user.ObjectMeta.DeletionTimestamp.IsZero() { - newUser := user.DeepCopy() - newUser.Spec.Groups = groups - patch := client.MergeFrom(user) - patchData, _ := patch.Data(newUser) - if _, err := c.ksClient.IamV1alpha2().Users(). - Patch(userName, patch.Type(), patchData); err != nil { - return err - } - } else { +func (c *Controller) patchUser(user *iamv1alpha2.User, groups []string) error { + newUser := user.DeepCopy() + newUser.Spec.Groups = groups + patch := client.MergeFrom(user) + patchData, _ := patch.Data(newUser) + if _, err := c.ksClient.IamV1alpha2().Users(). + Patch(user.Name, patch.Type(), patchData); err != nil { return err } return nil From 5500e93b5f7d11abe0acada7249a411c0c1fbb20 Mon Sep 17 00:00:00 2001 From: "Roland.Ma" Date: Mon, 9 Nov 2020 01:50:11 +0000 Subject: [PATCH 4/5] update comments Signed-off-by: Roland.Ma --- pkg/controller/group/group_controller.go | 58 ++----------------- .../groupbinding/groupbinding_controller.go | 56 ++---------------- 2 files changed, 11 insertions(+), 103 deletions(-) diff --git a/pkg/controller/group/group_controller.go b/pkg/controller/group/group_controller.go index 02886be4f..49fa942dd 100644 --- a/pkg/controller/group/group_controller.go +++ b/pkg/controller/group/group_controller.go @@ -42,9 +42,7 @@ import ( ) const ( - // SuccessSynced is used as part of the Event 'reason' when a Foo is synced - successSynced = "Synced" - // is synced successfully + successSynced = "Synced" messageResourceSynced = "Group synced successfully" controllerName = "groupbinding-controller" finalizer = "finalizers.kubesphere.io/groups" @@ -57,22 +55,12 @@ type Controller struct { groupInformer iamv1alpha2informers.GroupInformer groupLister iamv1alpha1listers.GroupLister groupSynced cache.InformerSynced - // workqueue is a rate limited work queue. This is used to queue work to be - // processed instead of performing it as soon as a change happens. This - // means we can ensure we only process a fixed amount of resources at a - // time, and makes it easy to ensure we are never processing the same item - // simultaneously in two different workers. - workqueue workqueue.RateLimitingInterface - // recorder is an event recorder for recording Event resources to the - // Kubernetes API. - recorder record.EventRecorder + workqueue workqueue.RateLimitingInterface + recorder record.EventRecorder } // NewController creates Group Controller instance func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, groupInformer iamv1alpha2informers.GroupInformer) *Controller { - // Create event broadcaster - // Add sample-controller types to the default Kubernetes Scheme so Events can be - // logged for sample-controller types. klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() @@ -103,20 +91,14 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - // Start the informer factories to begin populating the informer caches klog.Info("Starting Group controller") - - // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") - synced := []cache.InformerSynced{c.groupSynced} - if ok := cache.WaitForCacheSync(stopCh, synced...); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.Info("Starting workers") - // Launch two workers to process Foo resources for i := 0; i < threadiness; i++ { go wait.Until(c.runWorker, time.Second, stopCh) } @@ -148,40 +130,20 @@ func (c *Controller) processNextWorkItem() bool { if shutdown { return false } - - // We wrap this block in a func so we can defer c.workqueue.Done. err := func(obj interface{}) error { - // We call Done here so the workqueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the workqueue and attempted again after a back-off - // period. defer c.workqueue.Done(obj) var key string var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. + if key, ok = obj.(string); !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. c.workqueue.Forget(obj) utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } - // Run the reconcile, passing it the namespace/name string of the - // Foo resource to be synced. if err := c.reconcile(key); err != nil { - // Put the item back on the workqueue to handle any transient errors. c.workqueue.AddRateLimited(key) return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) } - // Finally, if no error occurs we Forget this item so it does not - // get queued again until another change happens. c.workqueue.Forget(obj) klog.Infof("Successfully synced %s:%s", "key", key) return nil @@ -195,15 +157,11 @@ func (c *Controller) processNextWorkItem() bool { return true } -// syncHandler compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the Foo resource -// with the current status of the resource. +// reconcile handles Group informer events, clear up related reource when group is being deleted. func (c *Controller) reconcile(key string) error { group, err := c.groupLister.Get(key) if err != nil { - // The user may no longer exist, in which case we stop - // processing. if errors.IsNotFound(err) { utilruntime.HandleError(fmt.Errorf("group '%s' in work queue no longer exists", key)) return nil @@ -212,8 +170,6 @@ func (c *Controller) reconcile(key string) error { return err } if group.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. if !sliceutil.HasString(group.Finalizers, finalizer) { group.ObjectMeta.Finalizers = append(group.ObjectMeta.Finalizers, finalizer) @@ -236,7 +192,6 @@ func (c *Controller) reconcile(key string) error { return err } - // remove our finalizer from the list and update it. group.Finalizers = sliceutil.RemoveString(group.ObjectMeta.Finalizers, func(item string) bool { return item == finalizer }) @@ -245,7 +200,6 @@ func (c *Controller) reconcile(key string) error { return err } } - // Our finalizer has finished, so the reconciler can do nothing. return nil } @@ -254,7 +208,7 @@ func (c *Controller) reconcile(key string) error { } func (c *Controller) Start(stopCh <-chan struct{}) error { - return c.Run(4, stopCh) + return c.Run(1, stopCh) } func (c *Controller) deleteGroupBindings(group *iam1alpha2.Group) error { diff --git a/pkg/controller/groupbinding/groupbinding_controller.go b/pkg/controller/groupbinding/groupbinding_controller.go index 568ba122f..0cc050db9 100644 --- a/pkg/controller/groupbinding/groupbinding_controller.go +++ b/pkg/controller/groupbinding/groupbinding_controller.go @@ -42,9 +42,7 @@ import ( ) const ( - // SuccessSynced is used as part of the Event 'reason' when a Foo is synced - successSynced = "Synced" - // is synced successfully + successSynced = "Synced" messageResourceSynced = "GroupBinding synced successfully" controllerName = "groupbinding-controller" finalizer = "finalizers.kubesphere.io/groupsbindings" @@ -57,23 +55,12 @@ type Controller struct { groupBindingInformer iamv1alpha2informers.GroupBindingInformer groupBindingLister iamv1alpha2listers.GroupBindingLister groupBindingSynced cache.InformerSynced - // workqueue is a rate limited work queue. This is used to queue work to be - // processed instead of performing it as soon as a change happens. This - // means we can ensure we only process a fixed amount of resources at a - // time, and makes it easy to ensure we are never processing the same item - // simultaneously in two different workers. - workqueue workqueue.RateLimitingInterface - // recorder is an event recorder for recording Event resources to the - // Kubernetes API. - recorder record.EventRecorder + workqueue workqueue.RateLimitingInterface + recorder record.EventRecorder } // NewController creates GroupBinding Controller instance func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, groupBindingInformer iamv1alpha2informers.GroupBindingInformer) *Controller { - // Create event broadcaster - // Add sample-controller types to the default Kubernetes Scheme so Events can be - // logged for sample-controller types. - klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) @@ -103,10 +90,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - // Start the informer factories to begin populating the informer caches klog.Info("Starting GroupBinding controller") - - // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") synced := []cache.InformerSynced{c.groupBindingSynced} @@ -116,7 +100,6 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { } klog.Info("Starting workers") - // Launch two workers to process Foo resources for i := 0; i < threadiness; i++ { go wait.Until(c.runWorker, time.Second, stopCh) } @@ -149,39 +132,19 @@ func (c *Controller) processNextWorkItem() bool { return false } - // We wrap this block in a func so we can defer c.workqueue.Done. err := func(obj interface{}) error { - // We call Done here so the workqueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the workqueue and attempted again after a back-off - // period. defer c.workqueue.Done(obj) var key string var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. if key, ok = obj.(string); !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. c.workqueue.Forget(obj) utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } - // Run the reconcile, passing it the namespace/name string of the - // Foo resource to be synced. if err := c.reconcile(key); err != nil { - // Put the item back on the workqueue to handle any transient errors. c.workqueue.AddRateLimited(key) return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) } - // Finally, if no error occurs we Forget this item so it does not - // get queued again until another change happens. c.workqueue.Forget(obj) klog.Infof("Successfully synced %s:%s", "key", key) return nil @@ -195,15 +158,11 @@ func (c *Controller) processNextWorkItem() bool { return true } -// syncHandler compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the Foo resource -// with the current status of the resource. +// reconcile handles GroupBinding informer events, it updates user's Groups property with the current GroupBinding. func (c *Controller) reconcile(key string) error { groupBinding, err := c.groupBindingLister.Get(key) if err != nil { - // The user may no longer exist, in which case we stop - // processing. if errors.IsNotFound(err) { utilruntime.HandleError(fmt.Errorf("groupbinding '%s' in work queue no longer exists", key)) return nil @@ -212,9 +171,6 @@ func (c *Controller) reconcile(key string) error { return err } if groupBinding.ObjectMeta.DeletionTimestamp.IsZero() { - - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. if !sliceutil.HasString(groupBinding.Finalizers, finalizer) { groupBinding.ObjectMeta.Finalizers = append(groupBinding.ObjectMeta.Finalizers, finalizer) if groupBinding, err = c.ksClient.IamV1alpha2().GroupBindings().Update(groupBinding); err != nil { @@ -231,7 +187,6 @@ func (c *Controller) reconcile(key string) error { return err } - // remove our finalizer from the list and update it. groupBinding.Finalizers = sliceutil.RemoveString(groupBinding.ObjectMeta.Finalizers, func(item string) bool { return item == finalizer }) @@ -240,7 +195,6 @@ func (c *Controller) reconcile(key string) error { return err } } - // Our finalizer has finished, so the reconciler can do nothing. return nil } @@ -254,7 +208,7 @@ func (c *Controller) reconcile(key string) error { } func (c *Controller) Start(stopCh <-chan struct{}) error { - return c.Run(4, stopCh) + return c.Run(2, stopCh) } func (c *Controller) unbindUser(groupBinding *iamv1alpha2.GroupBinding) error { From 0a5e04d186455890db34504a783d4b8d270f2ac1 Mon Sep 17 00:00:00 2001 From: "Roland.Ma" Date: Wed, 11 Nov 2020 05:19:09 +0000 Subject: [PATCH 5/5] Add UT Signed-off-by: Roland.Ma --- pkg/apis/iam/v1alpha2/group_test.go | 56 +++ pkg/apis/iam/v1alpha2/groupbinding_test.go | 59 +++ pkg/controller/group/group_controller.go | 1 + pkg/controller/group/group_controller_test.go | 13 +- .../groupbinding_controller_test.go | 338 ++++++++++++++++++ 5 files changed, 466 insertions(+), 1 deletion(-) create mode 100644 pkg/apis/iam/v1alpha2/group_test.go create mode 100644 pkg/apis/iam/v1alpha2/groupbinding_test.go create mode 100644 pkg/controller/groupbinding/groupbinding_controller_test.go diff --git a/pkg/apis/iam/v1alpha2/group_test.go b/pkg/apis/iam/v1alpha2/group_test.go new file mode 100644 index 000000000..d41462893 --- /dev/null +++ b/pkg/apis/iam/v1alpha2/group_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "testing" + + "github.com/onsi/gomega" + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestStorageGroup(t *testing.T) { + key := types.NamespacedName{ + Name: "foo", + } + created := &Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }} + g := gomega.NewGomegaWithT(t) + + // Test Create + fetched := &Group{} + g.Expect(c.Create(context.TODO(), created)).To(gomega.Succeed()) + + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed()) + g.Expect(fetched).To(gomega.Equal(created)) + + // Test Updating the Labels + updated := fetched.DeepCopy() + updated.Labels = map[string]string{"hello": "world"} + g.Expect(c.Update(context.TODO(), updated)).To(gomega.Succeed()) + + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed()) + g.Expect(fetched).To(gomega.Equal(updated)) + + // Test Delete + g.Expect(c.Delete(context.TODO(), fetched)).To(gomega.Succeed()) + g.Expect(c.Get(context.TODO(), key, fetched)).ToNot(gomega.Succeed()) +} diff --git a/pkg/apis/iam/v1alpha2/groupbinding_test.go b/pkg/apis/iam/v1alpha2/groupbinding_test.go new file mode 100644 index 000000000..c5789ab0e --- /dev/null +++ b/pkg/apis/iam/v1alpha2/groupbinding_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "testing" + + "github.com/onsi/gomega" + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestStorageGroupBinding(t *testing.T) { + key := types.NamespacedName{ + Name: "foo", + } + created := &GroupBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + GroupRef: GroupRef{Name: "bar"}, + Users: []string{"user"}, + } + g := gomega.NewGomegaWithT(t) + + // Test Create + fetched := &GroupBinding{} + g.Expect(c.Create(context.TODO(), created)).To(gomega.Succeed()) + + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed()) + g.Expect(fetched).To(gomega.Equal(created)) + + // Test Updating the Labels + updated := fetched.DeepCopy() + updated.Labels = map[string]string{"hello": "world"} + g.Expect(c.Update(context.TODO(), updated)).To(gomega.Succeed()) + + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed()) + g.Expect(fetched).To(gomega.Equal(updated)) + + // Test Delete + g.Expect(c.Delete(context.TODO(), fetched)).To(gomega.Succeed()) + g.Expect(c.Get(context.TODO(), key, fetched)).ToNot(gomega.Succeed()) +} diff --git a/pkg/controller/group/group_controller.go b/pkg/controller/group/group_controller.go index 49fa942dd..03c4e4694 100644 --- a/pkg/controller/group/group_controller.go +++ b/pkg/controller/group/group_controller.go @@ -227,6 +227,7 @@ func (c *Controller) deleteGroupBindings(group *iam1alpha2.Group) error { return nil } +// remove all RoleBindings. func (c *Controller) deleteRoleBindings(group *iam1alpha2.Group) error { listOptions := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{iam1alpha2.GroupReferenceLabel: group.Name}).String(), diff --git a/pkg/controller/group/group_controller_test.go b/pkg/controller/group/group_controller_test.go index d7edc7a5b..1935005cd 100644 --- a/pkg/controller/group/group_controller_test.go +++ b/pkg/controller/group/group_controller_test.go @@ -238,7 +238,7 @@ func getKey(group *v1alpha2.Group, t *testing.T) string { return key } -func TestDoNothing(t *testing.T) { +func TestDeletesGroup(t *testing.T) { f := newFixture(t) group := newGroup("test") @@ -260,3 +260,14 @@ func TestDoNothing(t *testing.T) { f.expectUpdateGroupsDeleteAction(deletedGroup) f.run(getKey(deletedGroup, t)) } + +func TestDoNothing(t *testing.T) { + f := newFixture(t) + group := newGroup("test") + + f.groupLister = append(f.groupLister, group) + f.objects = append(f.objects, group) + + f.expectUpdateGroupsFinalizerAction(group) + f.run(getKey(group, t)) +} diff --git a/pkg/controller/groupbinding/groupbinding_controller_test.go b/pkg/controller/groupbinding/groupbinding_controller_test.go new file mode 100644 index 000000000..699849a0f --- /dev/null +++ b/pkg/controller/groupbinding/groupbinding_controller_test.go @@ -0,0 +1,338 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package groupbinding + +import ( + "fmt" + "reflect" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + kubeinformers "k8s.io/client-go/informers" + k8sfake "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake" + ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + alwaysReady = func() bool { return true } + noResyncPeriodFunc = func() time.Duration { return 0 } +) + +type fixture struct { + t *testing.T + + ksclient *fake.Clientset + k8sclient *k8sfake.Clientset + // Objects to put in the store. + groupBindingLister []*v1alpha2.GroupBinding + userLister []*v1alpha2.User + // Actions expected to happen on the client. + kubeactions []core.Action + actions []core.Action + // Objects from here preloaded into NewSimpleFake. + kubeobjects []runtime.Object + objects []runtime.Object +} + +func newFixture(t *testing.T) *fixture { + f := &fixture{} + f.t = t + f.objects = []runtime.Object{} + f.kubeobjects = []runtime.Object{} + return f +} + +func newGroupBinding(name string, users []string) *v1alpha2.GroupBinding { + return &v1alpha2.GroupBinding{ + TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-binding", name), + }, + GroupRef: v1alpha2.GroupRef{ + Name: name, + }, + Users: users, + } +} + +func newUser(name string) *v1alpha2.User { + return &v1alpha2.User{ + TypeMeta: metav1.TypeMeta{APIVersion: v1alpha2.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha2.UserSpec{ + Email: fmt.Sprintf("%s@kubesphere.io", name), + Lang: "zh-CN", + Description: "fake user", + }, + } +} + +func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) { + f.ksclient = fake.NewSimpleClientset(f.objects...) + f.k8sclient = k8sfake.NewSimpleClientset(f.kubeobjects...) + + ksinformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc()) + k8sinformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc()) + + for _, groupBinding := range f.groupBindingLister { + err := ksinformers.Iam().V1alpha2().GroupBindings().Informer().GetIndexer().Add(groupBinding) + if err != nil { + f.t.Errorf("add groupBinding:%s", err) + } + } + + for _, u := range f.userLister { + err := ksinformers.Iam().V1alpha2().Users().Informer().GetIndexer().Add(u) + if err != nil { + f.t.Errorf("add groupBinding:%s", err) + } + } + + c := NewController(f.k8sclient, f.ksclient, + ksinformers.Iam().V1alpha2().GroupBindings()) + c.groupBindingSynced = alwaysReady + c.recorder = &record.FakeRecorder{} + + return c, ksinformers, k8sinformers +} + +func (f *fixture) run(userName string) { + f.runController(userName, true, false) +} + +func (f *fixture) runExpectError(userName string) { + f.runController(userName, true, true) +} + +func (f *fixture) runController(groupBinding string, startInformers bool, expectError bool) { + c, i, k8sI := f.newController() + if startInformers { + stopCh := make(chan struct{}) + defer close(stopCh) + i.Start(stopCh) + k8sI.Start(stopCh) + } + + err := c.reconcile(groupBinding) + if !expectError && err != nil { + f.t.Errorf("error syncing groupBinding: %v", err) + } else if expectError && err == nil { + f.t.Error("expected error syncing groupBinding, got nil") + } + + actions := filterInformerActions(f.ksclient.Actions()) + for i, action := range actions { + if len(f.actions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:]) + break + } + + expectedAction := f.actions[i] + checkAction(expectedAction, action, f.t) + } + + if len(f.actions) > len(actions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):]) + } + + k8sActions := filterInformerActions(f.k8sclient.Actions()) + for i, action := range k8sActions { + if len(f.kubeactions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:]) + break + } + + expectedAction := f.kubeactions[i] + checkAction(expectedAction, action, f.t) + } + + if len(f.kubeactions) > len(k8sActions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):]) + } +} + +// checkAction verifies that expected and actual actions are equal and both have +// same attached resources +func checkAction(expected, actual core.Action, t *testing.T) { + if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) { + t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual) + return + } + + if reflect.TypeOf(actual) != reflect.TypeOf(expected) { + t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual) + return + } + + switch a := actual.(type) { + case core.CreateActionImpl: + e, _ := expected.(core.CreateActionImpl) + expObject := e.GetObject() + object := a.GetObject() + + if !reflect.DeepEqual(expObject, object) { + t.Errorf("Action %s %s has wrong object\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object)) + } + case core.UpdateActionImpl: + e, _ := expected.(core.UpdateActionImpl) + expObject := e.GetObject() + object := a.GetObject() + expUser := expObject.(*v1alpha2.GroupBinding) + groupBinding := object.(*v1alpha2.GroupBinding) + + if !reflect.DeepEqual(expUser, groupBinding) { + t.Errorf("Action %s %s has wrong object\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object)) + } + case core.PatchActionImpl: + e, _ := expected.(core.PatchActionImpl) + expPatch := e.GetPatch() + patch := a.GetPatch() + + if !reflect.DeepEqual(expPatch, patch) { + t.Errorf("Action %s %s has wrong patch\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch)) + } + default: + t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it", + actual.GetVerb(), actual.GetResource().Resource) + } +} + +// filterInformerActions filters list and watch actions for testing resources. +// Since list and watch don't change resource state we can filter it to lower +// nose level in our tests. +func filterInformerActions(actions []core.Action) []core.Action { + var ret []core.Action + for _, action := range actions { + if len(action.GetNamespace()) == 0 && + (action.Matches("list", "groupbindings") || + action.Matches("watch", "groupbindings") || + action.Matches("list", "users") || + action.Matches("watch", "users") || + action.Matches("get", "users")) { + continue + } + ret = append(ret, action) + } + + return ret +} + +func (f *fixture) expectUpdateGroupsFinalizerAction(groupBinding *v1alpha2.GroupBinding) { + expect := groupBinding.DeepCopy() + expect.Finalizers = []string{"finalizers.kubesphere.io/groupsbindings"} + action := core.NewUpdateAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, "", expect) + f.actions = append(f.actions, action) +} + +func (f *fixture) expectUpdateGroupsDeleteAction(groupBinding *v1alpha2.GroupBinding) { + expect := groupBinding.DeepCopy() + expect.Finalizers = []string{} + action := core.NewUpdateAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, "", expect) + f.actions = append(f.actions, action) +} + +func (f *fixture) expectPatchUserAction(user *v1alpha2.User, groups []string) { + newUser := user.DeepCopy() + newUser.Spec.Groups = groups + patch := client.MergeFrom(user) + patchData, _ := patch.Data(newUser) + + f.actions = append(f.actions, core.NewPatchAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Resource: "users", Version: "v1alpha2"}, user.Namespace, user.Name, patch.Type(), patchData)) +} + +func getKey(groupBinding *v1alpha2.GroupBinding, t *testing.T) string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(groupBinding) + if err != nil { + t.Errorf("Unexpected error getting key for groupBinding %v: %v", groupBinding.Name, err) + return "" + } + return key +} + +func TestCreatesGroupBinding(t *testing.T) { + f := newFixture(t) + + users := []string{"user1"} + groupbinding := newGroupBinding("test", users) + groupbinding.ObjectMeta.Finalizers = append(groupbinding.ObjectMeta.Finalizers, finalizer) + f.groupBindingLister = append(f.groupBindingLister, groupbinding) + f.objects = append(f.objects, groupbinding) + + user := newUser("user1") + f.userLister = append(f.userLister, user) + + f.objects = append(f.objects, user) + + excepctGroups := []string{"test"} + f.expectPatchUserAction(user, excepctGroups) + + f.run(getKey(groupbinding, t)) +} + +func TestDeletesGroupBinding(t *testing.T) { + f := newFixture(t) + + users := []string{"user1"} + groupbinding := newGroupBinding("test", users) + deletedGroup := groupbinding.DeepCopy() + deletedGroup.Finalizers = append(groupbinding.ObjectMeta.Finalizers, finalizer) + + now := metav1.Now() + deletedGroup.ObjectMeta.DeletionTimestamp = &now + + f.groupBindingLister = append(f.groupBindingLister, deletedGroup) + f.objects = append(f.objects, deletedGroup) + + user := newUser("user1") + user.Spec.Groups = []string{"test"} + f.userLister = append(f.userLister, user) + f.objects = append(f.objects, user) + + f.expectPatchUserAction(user, nil) + f.expectUpdateGroupsDeleteAction(deletedGroup) + + f.run(getKey(deletedGroup, t)) +} + +func TestDoNothing(t *testing.T) { + f := newFixture(t) + users := []string{"user1"} + groupBinding := newGroupBinding("test", users) + + f.groupBindingLister = append(f.groupBindingLister, groupBinding) + f.objects = append(f.objects, groupBinding) + + f.expectUpdateGroupsFinalizerAction(groupBinding) + f.run(getKey(groupBinding, t)) + +}