Merge branch 'dev' into devops-refactor
# Conflicts: # cmd/controller-manager/app/controllers.go # hack/generate_client.sh # pkg/client/clientset/versioned/clientset.go # pkg/client/clientset/versioned/fake/clientset_generated.go # pkg/client/clientset/versioned/fake/register.go # pkg/client/clientset/versioned/scheme/register.go # pkg/client/informers/externalversions/generic.go
This commit is contained in:
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -44,22 +43,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
adminDescription = "Allows admin access to perform any action on any resource, it gives full control over every resource in the namespace."
|
||||
operatorDescription = "The maintainer of the namespace who can manage resources other than users and roles in the namespace."
|
||||
viewerDescription = "Allows viewer access to view all resources in the namespace."
|
||||
)
|
||||
|
||||
var (
|
||||
admin = rbac.Role{ObjectMeta: metav1.ObjectMeta{Name: "admin", Annotations: map[string]string{constants.DescriptionAnnotationKey: adminDescription, constants.CreatorAnnotationKey: constants.System}}, Rules: []rbac.PolicyRule{{Verbs: []string{"*"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}}
|
||||
operator = rbac.Role{ObjectMeta: metav1.ObjectMeta{Name: "operator", Annotations: map[string]string{constants.DescriptionAnnotationKey: operatorDescription, constants.CreatorAnnotationKey: constants.System}}, Rules: []rbac.PolicyRule{{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"*"}, Resources: []string{"*"}},
|
||||
{Verbs: []string{"*"}, APIGroups: []string{"apps", "extensions", "batch", "logging.kubesphere.io", "monitoring.kubesphere.io", "iam.kubesphere.io", "autoscaling", "alerting.kubesphere.io", "openpitrix.io", "app.k8s.io", "servicemesh.kubesphere.io", "operations.kubesphere.io", "devops.kubesphere.io"}, Resources: []string{"*"}},
|
||||
{Verbs: []string{"*"}, APIGroups: []string{"", "resources.kubesphere.io"}, Resources: []string{"jobs", "cronjobs", "daemonsets", "deployments", "horizontalpodautoscalers", "ingresses", "endpoints", "configmaps", "events", "persistentvolumeclaims", "pods", "podtemplates", "pods", "secrets", "services"}},
|
||||
}}
|
||||
viewer = rbac.Role{ObjectMeta: metav1.ObjectMeta{Name: "viewer", Annotations: map[string]string{constants.DescriptionAnnotationKey: viewerDescription, constants.CreatorAnnotationKey: constants.System}}, Rules: []rbac.PolicyRule{{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}}
|
||||
defaultRoles = []rbac.Role{admin, operator, viewer}
|
||||
)
|
||||
|
||||
/**
|
||||
* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
|
||||
* business logic. Delete these comments after modifying this file.*
|
||||
@@ -164,19 +147,6 @@ func (r *ReconcileNamespace) Reconcile(request reconcile.Request) (reconcile.Res
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
controlledByWorkspace, err := r.isControlledByWorkspace(instance)
|
||||
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if !controlledByWorkspace {
|
||||
|
||||
err = r.deleteRoleBindings(instance)
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.checkAndBindWorkspace(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -357,24 +327,3 @@ func (r *ReconcileNamespace) deleteRouter(namespace string) error {
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) deleteRoleBindings(namespace *corev1.Namespace) error {
|
||||
klog.V(4).Info("deleting role bindings namespace: ", namespace.Name)
|
||||
adminBinding := &rbac.RoleBinding{}
|
||||
adminBinding.Name = admin.Name
|
||||
adminBinding.Namespace = namespace.Name
|
||||
err := r.Delete(context.TODO(), adminBinding)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("deleting role binding namespace: %s, role binding: %s,error: %s", namespace.Name, adminBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
viewerBinding := &rbac.RoleBinding{}
|
||||
viewerBinding.Name = viewer.Name
|
||||
viewerBinding.Namespace = namespace.Name
|
||||
err = r.Delete(context.TODO(), viewerBinding)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("deleting role binding namespace: %s,role binding: %s,error: %s", namespace.Name, viewerBinding.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
230
pkg/controller/user/user_controller.go
Normal file
230
pkg/controller/user/user_controller.go
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
kubespherescheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
userinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
userlister "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "User synced successfully"
|
||||
controllerName = "user-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
kubeClientset kubernetes.Interface
|
||||
kubesphereClientset kubesphereclient.Interface
|
||||
|
||||
userInformer userinformer.UserInformer
|
||||
userLister userlister.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewController(kubeclientset kubernetes.Interface,
|
||||
kubesphereklientset kubesphereclient.Interface,
|
||||
userInformer userinformer.UserInformer) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
utilruntime.Must(kubespherescheme.AddToScheme(scheme.Scheme))
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
kubeClientset: kubeclientset,
|
||||
kubesphereClientset: kubesphereklientset,
|
||||
userInformer: userInformer,
|
||||
userLister: userInformer.Lister(),
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
userInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueUser,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueUser(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueUser,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
//init client
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting User controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.userSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueUser(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
// Get the user with this name
|
||||
user, err := c.userLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("user '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.updateUserStatus(user)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.recorder.Event(user, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateUserStatus(user *iamv1alpha2.User) error {
|
||||
userCopy := user.DeepCopy()
|
||||
userCopy.Status.State = iamv1alpha2.UserActive
|
||||
_, err := c.kubesphereClientset.IamV1alpha2().Users().Update(userCopy)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
245
pkg/controller/user/user_controller_test.go
Normal file
245
pkg/controller/user/user_controller_test.go
Normal file
@@ -0,0 +1,245 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
alwaysReady = func() bool { return true }
|
||||
noResyncPeriodFunc = func() time.Duration { return 0 }
|
||||
)
|
||||
|
||||
type fixture struct {
|
||||
t *testing.T
|
||||
|
||||
client *fake.Clientset
|
||||
kubeclient *k8sfake.Clientset
|
||||
// Objects to put in the store.
|
||||
userLister []*iamv1alpha2.User
|
||||
// Actions expected to happen on the client.
|
||||
kubeactions []core.Action
|
||||
actions []core.Action
|
||||
// Objects from here preloaded into NewSimpleFake.
|
||||
kubeobjects []runtime.Object
|
||||
objects []runtime.Object
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
f := &fixture{}
|
||||
f.t = t
|
||||
f.objects = []runtime.Object{}
|
||||
f.kubeobjects = []runtime.Object{}
|
||||
return f
|
||||
}
|
||||
|
||||
func newUser(name string) *iamv1alpha2.User {
|
||||
return &iamv1alpha2.User{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: iamv1alpha2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: iamv1alpha2.UserSpec{
|
||||
Email: fmt.Sprintf("%s@kubesphere.io", name),
|
||||
Lang: "zh-CN",
|
||||
Description: "fake user",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fixture) newController() (*Controller, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
|
||||
f.client = fake.NewSimpleClientset(f.objects...)
|
||||
f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
|
||||
|
||||
i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc())
|
||||
k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
|
||||
|
||||
for _, user := range f.userLister {
|
||||
err := i.Iam().V1alpha2().Users().Informer().GetIndexer().Add(user)
|
||||
if err != nil {
|
||||
f.t.Errorf("add user:%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
c := NewController(f.kubeclient, f.client, i.Iam().V1alpha2().Users())
|
||||
c.userSynced = alwaysReady
|
||||
c.recorder = &record.FakeRecorder{}
|
||||
|
||||
return c, i, k8sI
|
||||
}
|
||||
|
||||
func (f *fixture) run(userName string) {
|
||||
f.runController(userName, true, false)
|
||||
}
|
||||
|
||||
func (f *fixture) runExpectError(userName string) {
|
||||
f.runController(userName, true, true)
|
||||
}
|
||||
|
||||
func (f *fixture) runController(user string, startInformers bool, expectError bool) {
|
||||
c, i, k8sI := f.newController()
|
||||
if startInformers {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
i.Start(stopCh)
|
||||
k8sI.Start(stopCh)
|
||||
}
|
||||
|
||||
err := c.reconcile(user)
|
||||
if !expectError && err != nil {
|
||||
f.t.Errorf("error syncing user: %v", err)
|
||||
} else if expectError && err == nil {
|
||||
f.t.Error("expected error syncing user, got nil")
|
||||
}
|
||||
|
||||
actions := filterInformerActions(f.client.Actions())
|
||||
for i, action := range actions {
|
||||
if len(f.actions) < i+1 {
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
|
||||
break
|
||||
}
|
||||
|
||||
expectedAction := f.actions[i]
|
||||
checkAction(expectedAction, action, f.t)
|
||||
}
|
||||
|
||||
if len(f.actions) > len(actions) {
|
||||
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
|
||||
}
|
||||
|
||||
k8sActions := filterInformerActions(f.kubeclient.Actions())
|
||||
for i, action := range k8sActions {
|
||||
if len(f.kubeactions) < i+1 {
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:])
|
||||
break
|
||||
}
|
||||
|
||||
expectedAction := f.kubeactions[i]
|
||||
checkAction(expectedAction, action, f.t)
|
||||
}
|
||||
|
||||
if len(f.kubeactions) > len(k8sActions) {
|
||||
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
|
||||
}
|
||||
}
|
||||
|
||||
// checkAction verifies that expected and actual actions are equal and both have
|
||||
// same attached resources
|
||||
func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
|
||||
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
|
||||
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
switch a := actual.(type) {
|
||||
case core.CreateActionImpl:
|
||||
e, _ := expected.(core.CreateActionImpl)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
|
||||
}
|
||||
case core.UpdateActionImpl:
|
||||
e, _ := expected.(core.UpdateActionImpl)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
|
||||
}
|
||||
case core.PatchActionImpl:
|
||||
e, _ := expected.(core.PatchActionImpl)
|
||||
expPatch := e.GetPatch()
|
||||
patch := a.GetPatch()
|
||||
|
||||
if !reflect.DeepEqual(expPatch, patch) {
|
||||
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
|
||||
}
|
||||
default:
|
||||
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
|
||||
actual.GetVerb(), actual.GetResource().Resource)
|
||||
}
|
||||
}
|
||||
|
||||
// filterInformerActions filters list and watch actions for testing resources.
|
||||
// Since list and watch don't change resource state we can filter it to lower
|
||||
// nose level in our tests.
|
||||
func filterInformerActions(actions []core.Action) []core.Action {
|
||||
var ret []core.Action
|
||||
for _, action := range actions {
|
||||
if action.Matches("list", "users") ||
|
||||
action.Matches("watch", "users") {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, action)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (f *fixture) expectUpdateUserStatusAction(user *iamv1alpha2.User) {
|
||||
expect := user.DeepCopy()
|
||||
expect.Status.State = iamv1alpha2.UserActive
|
||||
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "users"}, "", expect)
|
||||
f.actions = append(f.actions, action)
|
||||
}
|
||||
|
||||
func getKey(user *iamv1alpha2.User, t *testing.T) string {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(user)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting key for user %v: %v", user.Name, err)
|
||||
return ""
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func TestDoNothing(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
user := newUser("test")
|
||||
|
||||
f.userLister = append(f.userLister, user)
|
||||
f.objects = append(f.objects, user)
|
||||
|
||||
f.expectUpdateUserStatusAction(user)
|
||||
f.run(getKey(user, t))
|
||||
}
|
||||
@@ -20,30 +20,17 @@ package workspace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
log "k8s.io/klog"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/kubesphere"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -54,14 +41,14 @@ const (
|
||||
|
||||
// Add creates a new Workspace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager, kubesphereClient kubesphere.Interface) error {
|
||||
return add(mgr, newReconciler(mgr, kubesphereClient))
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager, kubesphereClient kubesphere.Interface) reconcile.Reconciler {
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileWorkspace{Client: mgr.GetClient(), scheme: mgr.GetScheme(),
|
||||
recorder: mgr.GetEventRecorderFor("workspace-controller"), ksclient: kubesphereClient}
|
||||
recorder: mgr.GetEventRecorderFor("workspace-controller")}
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
@@ -88,7 +75,6 @@ type ReconcileWorkspace struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
ksclient kubesphere.Interface
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a Workspace object and makes changes based on the state read
|
||||
@@ -125,9 +111,6 @@ func (r *ReconcileWorkspace) Reconcile(request reconcile.Request) (reconcile.Res
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) {
|
||||
// our finalizer is present, so lets handle our external dependency
|
||||
if err := r.deleteDevOpsProjects(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// remove our finalizer from the list and update it.
|
||||
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
|
||||
@@ -142,497 +125,5 @@ func (r *ReconcileWorkspace) Reconcile(request reconcile.Request) (reconcile.Res
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if err = r.createWorkspaceAdmin(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.createWorkspaceRegular(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.createWorkspaceViewer(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.createWorkspaceRoleBindings(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err = r.bindNamespaces(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) createWorkspaceAdmin(instance *tenantv1alpha1.Workspace) error {
|
||||
found := &rbac.ClusterRole{}
|
||||
|
||||
admin := getWorkspaceAdmin(instance.Name)
|
||||
|
||||
if err := controllerutil.SetControllerReference(instance, admin, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: admin.Name}, found)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
log.Info("Creating workspace role", "workspace", instance.Name, "name", admin.Name)
|
||||
err = r.Create(context.TODO(), admin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found = admin
|
||||
} else if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the found object and write the result back if there are any changes
|
||||
if !reflect.DeepEqual(admin.Rules, found.Rules) || !reflect.DeepEqual(admin.Labels, found.Labels) || !reflect.DeepEqual(admin.Annotations, found.Annotations) {
|
||||
found.Rules = admin.Rules
|
||||
found.Labels = admin.Labels
|
||||
found.Annotations = admin.Annotations
|
||||
log.Info("Updating workspace role", "workspace", instance.Name, "name", admin.Name)
|
||||
err = r.Update(context.TODO(), found)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) createWorkspaceRegular(instance *tenantv1alpha1.Workspace) error {
|
||||
found := &rbac.ClusterRole{}
|
||||
|
||||
regular := getWorkspaceRegular(instance.Name)
|
||||
|
||||
if err := controllerutil.SetControllerReference(instance, regular, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: regular.Name}, found)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
|
||||
log.Info("Creating workspace role", "workspace", instance.Name, "name", regular.Name)
|
||||
err = r.Create(context.TODO(), regular)
|
||||
// Error reading the object - requeue the request.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found = regular
|
||||
} else if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the found object and write the result back if there are any changes
|
||||
if !reflect.DeepEqual(regular.Rules, found.Rules) || !reflect.DeepEqual(regular.Labels, found.Labels) || !reflect.DeepEqual(regular.Annotations, found.Annotations) {
|
||||
found.Rules = regular.Rules
|
||||
found.Labels = regular.Labels
|
||||
found.Annotations = regular.Annotations
|
||||
log.Info("Updating workspace role", "workspace", instance.Name, "name", regular.Name)
|
||||
err = r.Update(context.TODO(), found)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) createWorkspaceViewer(instance *tenantv1alpha1.Workspace) error {
|
||||
found := &rbac.ClusterRole{}
|
||||
|
||||
viewer := getWorkspaceViewer(instance.Name)
|
||||
|
||||
if err := controllerutil.SetControllerReference(instance, viewer, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: viewer.Name}, found)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
log.Info("Creating workspace role", "workspace", instance.Name, "name", viewer.Name)
|
||||
err = r.Create(context.TODO(), viewer)
|
||||
// Error reading the object - requeue the request.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found = viewer
|
||||
} else if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the found object and write the result back if there are any changes
|
||||
if !reflect.DeepEqual(viewer.Rules, found.Rules) || !reflect.DeepEqual(viewer.Labels, found.Labels) || !reflect.DeepEqual(viewer.Annotations, found.Annotations) {
|
||||
found.Rules = viewer.Rules
|
||||
found.Labels = viewer.Labels
|
||||
found.Annotations = viewer.Annotations
|
||||
log.Info("Updating workspace role", "workspace", instance.Name, "name", viewer.Name)
|
||||
err = r.Update(context.TODO(), found)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) createGroup(instance *tenantv1alpha1.Workspace) error {
|
||||
_, err := r.ksclient.DescribeGroup(instance.Name)
|
||||
|
||||
group := &models.Group{
|
||||
Name: instance.Name,
|
||||
}
|
||||
|
||||
if err != nil && kubesphere.IsNotFound(err) {
|
||||
log.Info("Creating group", "group name", instance.Name)
|
||||
_, err = r.ksclient.CreateGroup(group)
|
||||
if err != nil {
|
||||
if kubesphere.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) deleteGroup(instance *tenantv1alpha1.Workspace) error {
|
||||
log.Info("Creating group", "group name", instance.Name)
|
||||
if err := r.ksclient.DeleteGroup(instance.Name); err != nil {
|
||||
if kubesphere.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) deleteDevOpsProjects(instance *tenantv1alpha1.Workspace) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
log.Info("Delete DevOps Projects")
|
||||
for {
|
||||
projects, err := r.ksclient.ListWorkspaceDevOpsProjects(instance.Name)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to Get Workspace's DevOps Projects", "ws", instance.Name)
|
||||
return err
|
||||
}
|
||||
if projects.TotalCount == 0 {
|
||||
return nil
|
||||
}
|
||||
errChan := make(chan error, len(projects.Items))
|
||||
for _, project := range projects.Items {
|
||||
wg.Add(1)
|
||||
go func(workspace, devops string) {
|
||||
err = r.ksclient.DeleteWorkspaceDevOpsProjects(workspace, devops)
|
||||
errChan <- err
|
||||
wg.Done()
|
||||
}(instance.Name, project.ProjectId)
|
||||
}
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
log.Error(err, "delete devops project error")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) createWorkspaceRoleBindings(instance *tenantv1alpha1.Workspace) error {
|
||||
|
||||
adminRoleBinding := &rbac.ClusterRoleBinding{}
|
||||
adminRoleBinding.Name = getWorkspaceAdminRoleBindingName(instance.Name)
|
||||
adminRoleBinding.Labels = map[string]string{constants.WorkspaceLabelKey: instance.Name}
|
||||
adminRoleBinding.RoleRef = rbac.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: getWorkspaceAdminRoleName(instance.Name)}
|
||||
|
||||
workspaceManager := rbac.Subject{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: instance.Spec.Manager}
|
||||
|
||||
if workspaceManager.Name != "" {
|
||||
adminRoleBinding.Subjects = []rbac.Subject{workspaceManager}
|
||||
} else {
|
||||
adminRoleBinding.Subjects = []rbac.Subject{}
|
||||
}
|
||||
|
||||
if err := controllerutil.SetControllerReference(instance, adminRoleBinding, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundRoleBinding := &rbac.ClusterRoleBinding{}
|
||||
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: adminRoleBinding.Name}, foundRoleBinding)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
log.Info("Creating workspace role binding", "workspace", instance.Name, "name", adminRoleBinding.Name)
|
||||
err = r.Create(context.TODO(), adminRoleBinding)
|
||||
// Error reading the object - requeue the request.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundRoleBinding = adminRoleBinding
|
||||
} else if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the found object and write the result back if there are any changes
|
||||
if !reflect.DeepEqual(adminRoleBinding.RoleRef, foundRoleBinding.RoleRef) {
|
||||
log.Info("Deleting conflict workspace role binding", "workspace", instance.Name, "name", adminRoleBinding.Name)
|
||||
err = r.Delete(context.TODO(), foundRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("conflict workspace role binding %s, waiting for recreate", foundRoleBinding.Name)
|
||||
}
|
||||
|
||||
if workspaceManager.Name != "" && !hasSubject(foundRoleBinding.Subjects, workspaceManager) {
|
||||
foundRoleBinding.Subjects = append(foundRoleBinding.Subjects, workspaceManager)
|
||||
log.Info("Updating workspace role binding", "workspace", instance.Name, "name", adminRoleBinding.Name)
|
||||
err = r.Update(context.TODO(), foundRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
regularRoleBinding := &rbac.ClusterRoleBinding{}
|
||||
regularRoleBinding.Name = getWorkspaceRegularRoleBindingName(instance.Name)
|
||||
regularRoleBinding.Labels = map[string]string{constants.WorkspaceLabelKey: instance.Name}
|
||||
regularRoleBinding.RoleRef = rbac.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: getWorkspaceRegularRoleName(instance.Name)}
|
||||
regularRoleBinding.Subjects = []rbac.Subject{}
|
||||
|
||||
if err = controllerutil.SetControllerReference(instance, regularRoleBinding, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: regularRoleBinding.Name}, foundRoleBinding)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
log.Info("Creating workspace role binding", "workspace", instance.Name, "name", regularRoleBinding.Name)
|
||||
err = r.Create(context.TODO(), regularRoleBinding)
|
||||
// Error reading the object - requeue the request.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundRoleBinding = regularRoleBinding
|
||||
} else if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the found object and write the result back if there are any changes
|
||||
if !reflect.DeepEqual(regularRoleBinding.RoleRef, foundRoleBinding.RoleRef) {
|
||||
log.Info("Deleting conflict workspace role binding", "workspace", instance.Name, "name", regularRoleBinding.Name)
|
||||
err = r.Delete(context.TODO(), foundRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("conflict workspace role binding %s, waiting for recreate", foundRoleBinding.Name)
|
||||
}
|
||||
|
||||
viewerRoleBinding := &rbac.ClusterRoleBinding{}
|
||||
viewerRoleBinding.Name = getWorkspaceViewerRoleBindingName(instance.Name)
|
||||
viewerRoleBinding.Labels = map[string]string{constants.WorkspaceLabelKey: instance.Name}
|
||||
viewerRoleBinding.RoleRef = rbac.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: getWorkspaceViewerRoleName(instance.Name)}
|
||||
viewerRoleBinding.Subjects = []rbac.Subject{}
|
||||
|
||||
if err = controllerutil.SetControllerReference(instance, viewerRoleBinding, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: viewerRoleBinding.Name}, foundRoleBinding)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
log.Info("Creating workspace role binding", "workspace", instance.Name, "name", viewerRoleBinding.Name)
|
||||
err = r.Create(context.TODO(), viewerRoleBinding)
|
||||
// Error reading the object - requeue the request.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundRoleBinding = viewerRoleBinding
|
||||
} else if err != nil {
|
||||
// Error reading the object - requeue the request.
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the found object and write the result back if there are any changes
|
||||
if !reflect.DeepEqual(viewerRoleBinding.RoleRef, foundRoleBinding.RoleRef) {
|
||||
log.Info("Deleting conflict workspace role binding", "workspace", instance.Name, "name", viewerRoleBinding.Name)
|
||||
err = r.Delete(context.TODO(), foundRoleBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("conflict workspace role binding %s, waiting for recreate", foundRoleBinding.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileWorkspace) bindNamespaces(instance *tenantv1alpha1.Workspace) error {
|
||||
|
||||
nsList := &corev1.NamespaceList{}
|
||||
options := client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: instance.Name})}
|
||||
err := r.List(context.TODO(), nsList, &options)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err, fmt.Sprintf("list workspace %s namespace failed", instance.Name))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, namespace := range nsList.Items {
|
||||
if !metav1.IsControlledBy(&namespace, instance) {
|
||||
if err := controllerutil.SetControllerReference(instance, &namespace, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("Bind workspace", "namespace", namespace.Name, "workspace", instance.Name)
|
||||
err = r.Update(context.TODO(), &namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasSubject(subjects []rbac.Subject, user rbac.Subject) bool {
|
||||
for _, subject := range subjects {
|
||||
if reflect.DeepEqual(subject, user) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getWorkspaceAdminRoleName(workspaceName string) string {
|
||||
return fmt.Sprintf("workspace:%s:admin", workspaceName)
|
||||
}
|
||||
func getWorkspaceRegularRoleName(workspaceName string) string {
|
||||
return fmt.Sprintf("workspace:%s:regular", workspaceName)
|
||||
}
|
||||
func getWorkspaceViewerRoleName(workspaceName string) string {
|
||||
return fmt.Sprintf("workspace:%s:viewer", workspaceName)
|
||||
}
|
||||
|
||||
func getWorkspaceAdminRoleBindingName(workspaceName string) string {
|
||||
return fmt.Sprintf("workspace:%s:admin", workspaceName)
|
||||
}
|
||||
|
||||
func getWorkspaceRegularRoleBindingName(workspaceName string) string {
|
||||
return fmt.Sprintf("workspace:%s:regular", workspaceName)
|
||||
}
|
||||
|
||||
func getWorkspaceViewerRoleBindingName(workspaceName string) string {
|
||||
return fmt.Sprintf("workspace:%s:viewer", workspaceName)
|
||||
}
|
||||
|
||||
func getWorkspaceAdmin(workspaceName string) *rbac.ClusterRole {
|
||||
admin := &rbac.ClusterRole{}
|
||||
admin.Name = getWorkspaceAdminRoleName(workspaceName)
|
||||
admin.Labels = map[string]string{constants.WorkspaceLabelKey: workspaceName}
|
||||
admin.Annotations = map[string]string{constants.DisplayNameAnnotationKey: constants.WorkspaceAdmin, constants.DescriptionAnnotationKey: workspaceAdminDescription, constants.CreatorAnnotationKey: constants.System}
|
||||
admin.Rules = []rbac.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"*"},
|
||||
APIGroups: []string{"*"},
|
||||
ResourceNames: []string{workspaceName},
|
||||
Resources: []string{"workspaces", "workspaces/*"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"watch"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"namespaces"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"list"},
|
||||
APIGroups: []string{"iam.kubesphere.io"},
|
||||
Resources: []string{"users"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"get", "list"},
|
||||
APIGroups: []string{"openpitrix.io"},
|
||||
Resources: []string{"categories"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"*"},
|
||||
APIGroups: []string{"openpitrix.io"},
|
||||
Resources: []string{"applications", "apps", "apps/versions", "apps/events", "apps/action", "apps/audits", "repos", "repos/action", "attachments"},
|
||||
},
|
||||
}
|
||||
|
||||
return admin
|
||||
}
|
||||
|
||||
func getWorkspaceRegular(workspaceName string) *rbac.ClusterRole {
|
||||
regular := &rbac.ClusterRole{}
|
||||
regular.Name = getWorkspaceRegularRoleName(workspaceName)
|
||||
regular.Labels = map[string]string{constants.WorkspaceLabelKey: workspaceName}
|
||||
regular.Annotations = map[string]string{constants.DisplayNameAnnotationKey: constants.WorkspaceRegular, constants.DescriptionAnnotationKey: workspaceRegularDescription, constants.CreatorAnnotationKey: constants.System}
|
||||
regular.Rules = []rbac.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"get"},
|
||||
APIGroups: []string{"*"},
|
||||
Resources: []string{"workspaces"},
|
||||
ResourceNames: []string{workspaceName},
|
||||
}, {
|
||||
Verbs: []string{"create"},
|
||||
APIGroups: []string{"tenant.kubesphere.io"},
|
||||
Resources: []string{"workspaces/namespaces", "workspaces/devops"},
|
||||
ResourceNames: []string{workspaceName},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"get"},
|
||||
APIGroups: []string{"iam.kubesphere.io"},
|
||||
ResourceNames: []string{workspaceName},
|
||||
Resources: []string{"workspaces/members"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"get", "list"},
|
||||
APIGroups: []string{"openpitrix.io"},
|
||||
Resources: []string{"apps/events", "apps/action", "apps/audits", "categories"},
|
||||
},
|
||||
|
||||
{
|
||||
Verbs: []string{"*"},
|
||||
APIGroups: []string{"openpitrix.io"},
|
||||
Resources: []string{"applications", "apps", "apps/versions", "repos", "repos/action", "attachments"},
|
||||
},
|
||||
}
|
||||
|
||||
return regular
|
||||
}
|
||||
|
||||
func getWorkspaceViewer(workspaceName string) *rbac.ClusterRole {
|
||||
viewer := &rbac.ClusterRole{}
|
||||
viewer.Name = getWorkspaceViewerRoleName(workspaceName)
|
||||
viewer.Labels = map[string]string{constants.WorkspaceLabelKey: workspaceName}
|
||||
viewer.Annotations = map[string]string{constants.DisplayNameAnnotationKey: constants.WorkspaceViewer, constants.DescriptionAnnotationKey: workspaceViewerDescription, constants.CreatorAnnotationKey: constants.System}
|
||||
viewer.Rules = []rbac.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"get", "list"},
|
||||
APIGroups: []string{"*"},
|
||||
ResourceNames: []string{workspaceName},
|
||||
Resources: []string{"workspaces", "workspaces/*"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"watch"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"namespaces"},
|
||||
},
|
||||
{
|
||||
Verbs: []string{"get", "list"},
|
||||
APIGroups: []string{"openpitrix.io"},
|
||||
Resources: []string{"applications", "apps", "apps/events", "apps/action", "apps/audits", "apps/versions", "repos", "categories", "attachments"},
|
||||
},
|
||||
}
|
||||
return viewer
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user