Merge pull request #3179 from wansir/workspace
refactor workspace controller
This commit is contained in:
@@ -49,21 +49,20 @@ import (
|
||||
const (
|
||||
successSynced = "Synced"
|
||||
messageResourceSynced = "Group synced successfully"
|
||||
controllerName = "groupbinding-controller"
|
||||
controllerName = "group-controller"
|
||||
finalizer = "finalizers.kubesphere.io/groups"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
controller.BaseController
|
||||
scheme *runtime.Scheme
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
groupInformer iamv1alpha2informers.GroupInformer
|
||||
groupLister iamv1alpha1listers.GroupLister
|
||||
recorder record.EventRecorder
|
||||
federatedGroupInformer fedv1beta1informers.FederatedGroupInformer
|
||||
federatedGroupLister fedv1beta1lister.FederatedGroupLister
|
||||
multiClusterEnabled bool
|
||||
scheme *runtime.Scheme
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
groupInformer iamv1alpha2informers.GroupInformer
|
||||
groupLister iamv1alpha1listers.GroupLister
|
||||
recorder record.EventRecorder
|
||||
federatedGroupLister fedv1beta1lister.FederatedGroupLister
|
||||
multiClusterEnabled bool
|
||||
}
|
||||
|
||||
// NewController creates Group Controller instance
|
||||
@@ -81,18 +80,17 @@ func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface
|
||||
Synced: []cache.InformerSynced{groupInformer.Informer().HasSynced},
|
||||
Name: controllerName,
|
||||
},
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}),
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
groupInformer: groupInformer,
|
||||
groupLister: groupInformer.Lister(),
|
||||
federatedGroupInformer: federatedGroupInformer,
|
||||
federatedGroupLister: federatedGroupInformer.Lister(),
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}),
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
groupInformer: groupInformer,
|
||||
groupLister: groupInformer.Lister(),
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
}
|
||||
|
||||
if ctl.multiClusterEnabled {
|
||||
ctl.Synced = append(ctl.Synced, ctl.federatedGroupInformer.Informer().HasSynced)
|
||||
ctl.federatedGroupLister = federatedGroupInformer.Lister()
|
||||
ctl.Synced = append(ctl.Synced, federatedGroupInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
ctl.Handler = ctl.reconcile
|
||||
|
||||
@@ -56,15 +56,13 @@ const (
|
||||
|
||||
type Controller struct {
|
||||
controller.BaseController
|
||||
scheme *runtime.Scheme
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
groupBindingInformer iamv1alpha2informers.GroupBindingInformer
|
||||
groupBindingLister iamv1alpha2listers.GroupBindingLister
|
||||
recorder record.EventRecorder
|
||||
federatedGroupBindingInformer fedv1beta1informers.FederatedGroupBindingInformer
|
||||
federatedGroupBindingLister fedv1beta1lister.FederatedGroupBindingLister
|
||||
multiClusterEnabled bool
|
||||
scheme *runtime.Scheme
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
groupBindingLister iamv1alpha2listers.GroupBindingLister
|
||||
recorder record.EventRecorder
|
||||
federatedGroupBindingLister fedv1beta1lister.FederatedGroupBindingLister
|
||||
multiClusterEnabled bool
|
||||
}
|
||||
|
||||
// NewController creates GroupBinding Controller instance
|
||||
@@ -82,18 +80,16 @@ func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface
|
||||
Synced: []cache.InformerSynced{groupBindingInformer.Informer().HasSynced},
|
||||
Name: controllerName,
|
||||
},
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
groupBindingInformer: groupBindingInformer,
|
||||
groupBindingLister: groupBindingInformer.Lister(),
|
||||
federatedGroupBindingInformer: federatedGroupBindingInformer,
|
||||
federatedGroupBindingLister: federatedGroupBindingInformer.Lister(),
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
recorder: recorder,
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
groupBindingLister: groupBindingInformer.Lister(),
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
recorder: recorder,
|
||||
}
|
||||
ctl.Handler = ctl.reconcile
|
||||
if ctl.multiClusterEnabled {
|
||||
ctl.Synced = append(ctl.Synced, ctl.federatedGroupBindingInformer.Informer().HasSynced)
|
||||
ctl.federatedGroupBindingLister = federatedGroupBindingInformer.Lister()
|
||||
ctl.Synced = append(ctl.Synced, federatedGroupBindingInformer.Informer().HasSynced)
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
groupBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
||||
@@ -20,240 +20,207 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"reflect"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
// Add creates a new Namespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
const (
|
||||
controllerName = "namespace-controller"
|
||||
)
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileNamespace{
|
||||
Client: mgr.GetClient(),
|
||||
scheme: mgr.GetScheme(),
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("namespace-controller", mgr, controller.Options{Reconciler: r})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to Namespace
|
||||
err = c.Watch(&source.Kind{Type: &corev1.Namespace{}}, &handler.EnqueueRequestForObject{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileNamespace{}
|
||||
|
||||
// ReconcileNamespace reconciles a Namespace object
|
||||
type ReconcileNamespace struct {
|
||||
// Reconciler reconciles a Namespace object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
Logger logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
MaxConcurrentReconciles int
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a Namespace object and makes changes based on the state read
|
||||
// and what is in the Namespace.Spec
|
||||
// +kubebuilder:rbac:groups=core.kubesphere.io,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core.kubesphere.io,resources=namespaces/status,verbs=get;update;patch
|
||||
func (r *ReconcileNamespace) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
// Fetch the Namespace instance
|
||||
instance := &corev1.Namespace{}
|
||||
err := r.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
// The object is being deleted
|
||||
// our finalizer is present, so lets handle our external dependency
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
if r.Logger == nil {
|
||||
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
}
|
||||
if r.Recorder == nil {
|
||||
r.Recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
}
|
||||
if r.MaxConcurrentReconciles <= 0 {
|
||||
r.MaxConcurrentReconciles = 1
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
|
||||
}).
|
||||
For(&corev1.Namespace{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=rolebases,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.Logger.WithValues("namespace", req.NamespacedName)
|
||||
rootCtx := context.Background()
|
||||
namespace := &corev1.Namespace{}
|
||||
if err := r.Get(rootCtx, req.NamespacedName, namespace); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
// name of your custom finalizer
|
||||
finalizer := "finalizers.kubesphere.io/namespaces"
|
||||
|
||||
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if namespace.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object.
|
||||
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) {
|
||||
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, finalizer)
|
||||
if instance.Labels == nil {
|
||||
instance.Labels = make(map[string]string)
|
||||
if !sliceutil.HasString(namespace.ObjectMeta.Finalizers, finalizer) {
|
||||
// create only once, ignore already exists error
|
||||
if err := r.initCreatorRoleBinding(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
instance.Labels[constants.NamespaceLabelKey] = instance.Name
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
namespace.ObjectMeta.Finalizers = append(namespace.ObjectMeta.Finalizers, finalizer)
|
||||
if namespace.Labels == nil {
|
||||
namespace.Labels = make(map[string]string)
|
||||
}
|
||||
// used for NetworkPolicyPeer.NamespaceSelector
|
||||
namespace.Labels[constants.NamespaceLabelKey] = namespace.Name
|
||||
if err := r.Update(rootCtx, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) {
|
||||
if err = r.deleteRouter(instance.Name); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if sliceutil.HasString(namespace.ObjectMeta.Finalizers, finalizer) {
|
||||
if err := r.deleteRouter(rootCtx, logger, namespace.Name); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// remove our finalizer from the list and update it.
|
||||
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
|
||||
namespace.ObjectMeta.Finalizers = sliceutil.RemoveString(namespace.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == finalizer
|
||||
})
|
||||
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if err := r.Update(rootCtx, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Our finalizer has finished, so the reconciler can do nothing.
|
||||
return reconcile.Result{}, nil
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// initialize subresource if created by kubesphere
|
||||
if workspace := instance.Labels[constants.WorkspaceLabelKey]; workspace != "" {
|
||||
if err = r.bindWorkspace(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if workspace := namespace.Labels[tenantv1alpha1.WorkspaceLabel]; workspace != "" {
|
||||
if err := r.bindWorkspace(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err = r.initRoles(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if err = r.initCreatorRoleBinding(instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if err := r.initRoles(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
r.unbindWorkspace(instance)
|
||||
if err := r.unbindWorkspace(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
r.Recorder.Event(namespace, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) bindWorkspace(namespace *corev1.Namespace) error {
|
||||
workspaceName := namespace.Labels[constants.WorkspaceLabelKey]
|
||||
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
|
||||
workspace := &tenantv1alpha1.Workspace{}
|
||||
if err := r.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: namespace.Labels[constants.WorkspaceLabelKey]}, workspace); err != nil {
|
||||
// remove existed owner reference if workspace not found
|
||||
if errors.IsNotFound(err) && k8sutil.IsControlledBy(namespace.OwnerReferences, tenantv1alpha1.ResourceKindWorkspace, "") {
|
||||
return r.unbindWorkspace(ctx, logger, namespace)
|
||||
}
|
||||
// skip if workspace not found
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Warning(err)
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// owner reference not match workspace label
|
||||
if !metav1.IsControlledBy(namespace, workspace) {
|
||||
workspace.OwnerReferences = removeWorkspaceOwnerReference(workspace.OwnerReferences)
|
||||
if err := controllerutil.SetControllerReference(workspace, namespace, r.scheme); err != nil {
|
||||
klog.Error(err)
|
||||
namespace := namespace.DeepCopy()
|
||||
namespace.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(namespace.OwnerReferences)
|
||||
if err := controllerutil.SetControllerReference(workspace, namespace, scheme.Scheme); err != nil {
|
||||
logger.Error(err, "set controller reference failed")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.Update(context.TODO(), namespace); err != nil {
|
||||
klog.Error(err)
|
||||
logger.V(4).Info("update namespace owner reference", "workspace", workspace.Name)
|
||||
if err := r.Update(ctx, namespace); err != nil {
|
||||
logger.Error(err, "update namespace failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) unbindWorkspace(namespace *corev1.Namespace) error {
|
||||
|
||||
func (r *Reconciler) unbindWorkspace(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
|
||||
if k8sutil.IsControlledBy(namespace.OwnerReferences, tenantv1alpha1.ResourceKindWorkspace, "") {
|
||||
namespace := namespace.DeepCopy()
|
||||
namespace.OwnerReferences = removeWorkspaceOwnerReference(namespace.OwnerReferences)
|
||||
if err := r.Update(context.TODO(), namespace); err != nil {
|
||||
klog.Error(err)
|
||||
namespace.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(namespace.OwnerReferences)
|
||||
logger.V(4).Info("remove owner reference", "workspace", namespace.Labels[constants.WorkspaceLabelKey])
|
||||
if err := r.Update(ctx, namespace); err != nil {
|
||||
logger.Error(err, "update owner reference failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove workspace kind owner reference of the namespace
|
||||
func removeWorkspaceOwnerReference(ownerReferences []metav1.OwnerReference) []metav1.OwnerReference {
|
||||
tmp := make([]metav1.OwnerReference, 0)
|
||||
for _, owner := range ownerReferences {
|
||||
if owner.Kind != tenantv1alpha1.ResourceKindWorkspace {
|
||||
tmp = append(tmp, owner)
|
||||
}
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) deleteRouter(namespace string) error {
|
||||
func (r *Reconciler) deleteRouter(ctx context.Context, logger logr.Logger, namespace string) error {
|
||||
routerName := constants.IngressControllerPrefix + namespace
|
||||
// delete service first
|
||||
found := corev1.Service{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Namespace: constants.IngressControllerNamespace, Name: routerName}, &found)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
err = r.Delete(context.TODO(), &found)
|
||||
// delete service first
|
||||
service := corev1.Service{}
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: constants.IngressControllerNamespace, Name: routerName}, &service)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
logger.V(4).Info("delete router service", "namespace", service.Namespace, "service", service.Name)
|
||||
err = r.Delete(ctx, &service)
|
||||
if err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// delete deployment
|
||||
deploy := appsv1.Deployment{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Namespace: constants.IngressControllerNamespace, Name: routerName}, &deploy)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: constants.IngressControllerNamespace, Name: routerName}, &deploy)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
logger.Error(err, "delete router deployment failed")
|
||||
return err
|
||||
}
|
||||
|
||||
err = r.Delete(context.TODO(), &deploy)
|
||||
logger.V(4).Info("delete router deployment", "namespace", deploy.Namespace, "deployment", deploy.Name)
|
||||
err = r.Delete(ctx, &deploy)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) initRoles(namespace *corev1.Namespace) error {
|
||||
var roleBases iamv1alpha2.RoleBaseList
|
||||
|
||||
func (r *Reconciler) initRoles(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
|
||||
var templates iamv1alpha2.RoleBaseList
|
||||
var labelKey string
|
||||
// filtering initial roles by label
|
||||
if namespace.Labels[constants.DevOpsProjectLabelKey] != "" {
|
||||
@@ -263,29 +230,26 @@ func (r *ReconcileNamespace) initRoles(namespace *corev1.Namespace) error {
|
||||
// scope.kubesphere.io/namespace: ""
|
||||
labelKey = fmt.Sprintf(iamv1alpha2.ScopeLabelFormat, iamv1alpha2.ScopeNamespace)
|
||||
}
|
||||
err := r.List(context.Background(), &roleBases, client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(labels.Set{labelKey: ""})})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
|
||||
if err := r.List(ctx, &templates, client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(labels.Set{labelKey: ""})}); err != nil {
|
||||
logger.Error(err, "list role bases failed")
|
||||
return err
|
||||
}
|
||||
|
||||
for _, roleBase := range roleBases.Items {
|
||||
for _, template := range templates.Items {
|
||||
var role rbacv1.Role
|
||||
if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil && role.Kind == iamv1alpha2.ResourceKindRole {
|
||||
if err := yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(template.Role.Raw), 1024).Decode(&role); err == nil && role.Kind == iamv1alpha2.ResourceKindRole {
|
||||
var old rbacv1.Role
|
||||
err := r.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, &old)
|
||||
if err != nil {
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, &old); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
role.Namespace = namespace.Name
|
||||
err = r.Client.Create(context.Background(), &role)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
logger.V(4).Info("init builtin role", "role", role.Name)
|
||||
if err := r.Client.Create(ctx, &role); err != nil {
|
||||
logger.Error(err, "create role failed")
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(role.Labels, old.Labels) ||
|
||||
!reflect.DeepEqual(role.Annotations, old.Annotations) ||
|
||||
!reflect.DeepEqual(role.Rules, old.Rules) {
|
||||
@@ -294,36 +258,46 @@ func (r *ReconcileNamespace) initRoles(namespace *corev1.Namespace) error {
|
||||
old.Annotations = role.Annotations
|
||||
old.Rules = role.Rules
|
||||
|
||||
if err := r.Update(context.Background(), &old); err != nil {
|
||||
klog.Error(err)
|
||||
logger.V(4).Info("update builtin role", "role", role.Name)
|
||||
if err := r.Update(ctx, &old); err != nil {
|
||||
logger.Error(err, "update role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if err != nil {
|
||||
logger.Error(fmt.Errorf("invalid role base found"), "init roles failed", "name", template.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileNamespace) initCreatorRoleBinding(namespace *corev1.Namespace) error {
|
||||
func (r *Reconciler) initCreatorRoleBinding(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace) error {
|
||||
creator := namespace.Annotations[constants.CreatorAnnotationKey]
|
||||
if creator == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var user iamv1alpha2.User
|
||||
if err := r.Get(context.Background(), types.NamespacedName{Name: creator}, &user); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: creator}, &user); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
creatorRoleBinding := newCreatorRoleBinding(creator, namespace.Name)
|
||||
logger.V(4).Info("init creator role binding", "creator", user.Name)
|
||||
if err := r.Client.Create(ctx, creatorRoleBinding); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
logger.Error(err, "create role binding failed")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
creatorRoleBinding := &rbacv1.RoleBinding{
|
||||
func newCreatorRoleBinding(creator string, namespace string) *rbacv1.RoleBinding {
|
||||
return &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", creator, iamv1alpha2.NamespaceAdmin),
|
||||
Labels: map[string]string{iamv1alpha2.UserReferenceLabel: creator},
|
||||
Namespace: namespace.Name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
@@ -338,14 +312,4 @@ func (r *ReconcileNamespace) initCreatorRoleBinding(namespace *corev1.Namespace)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.Client.Create(context.Background(), creatorRoleBinding); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,59 +17,84 @@ limitations under the License.
|
||||
package namespace
|
||||
|
||||
import (
|
||||
stdlog "log"
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
var cfg *rest.Config
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
}
|
||||
apis.AddToScheme(scheme.Scheme)
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
var err error
|
||||
if cfg, err = t.Start(); err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
code := m.Run()
|
||||
t.Stop()
|
||||
os.Exit(code)
|
||||
func TestNamespaceController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Namespace Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and
|
||||
// writes the request to requests after Reconcile is finished.
|
||||
func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) {
|
||||
requests := make(chan reconcile.Request)
|
||||
fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
|
||||
result, err := inner.Reconcile(req)
|
||||
requests <- req
|
||||
return result, err
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
return fn, requests
|
||||
}
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&Reconciler{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// StartTestManager adds recFn
|
||||
func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) {
|
||||
stop := make(chan struct{})
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred())
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
return stop, wg
|
||||
}
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -15,3 +15,88 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"context"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = Describe("Namespace", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
workspace := &tenantv1alpha1.Workspace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-workspace",
|
||||
},
|
||||
}
|
||||
BeforeEach(func() {
|
||||
// Create workspace
|
||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||
})
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("Namespace Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
namespace := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-namespace",
|
||||
Labels: map[string]string{tenantv1alpha1.WorkspaceLabel: workspace.Name},
|
||||
},
|
||||
}
|
||||
|
||||
// Create namespace
|
||||
Expect(k8sClient.Create(context.Background(), namespace)).Should(Succeed())
|
||||
|
||||
By("Expecting to create namespace successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: namespace.Name}, namespace)
|
||||
return !namespace.CreationTimestamp.IsZero()
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Expecting to set owner reference successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: namespace.Name}, namespace)
|
||||
return len(namespace.OwnerReferences) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, workspace)).Should(Succeed())
|
||||
|
||||
controlled := true
|
||||
expectedOwnerReference := metav1.OwnerReference{
|
||||
Kind: workspace.Kind,
|
||||
APIVersion: workspace.APIVersion,
|
||||
UID: workspace.UID,
|
||||
Name: workspace.Name,
|
||||
Controller: &controlled,
|
||||
BlockOwnerDeletion: &controlled,
|
||||
}
|
||||
|
||||
By("Expecting to bind workspace successfully")
|
||||
Expect(namespace.OwnerReferences).To(ContainElement(expectedOwnerReference))
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, workspace)).Should(Succeed())
|
||||
|
||||
By("Expecting to update namespace successfully")
|
||||
updated := namespace.DeepCopy()
|
||||
updated.Labels[constants.WorkspaceLabelKey] = "workspace-not-exist"
|
||||
Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed())
|
||||
|
||||
By("Expecting to unbind workspace successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: namespace.Name}, namespace)
|
||||
return len(namespace.OwnerReferences) == 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -27,6 +27,13 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
SuccessSynced = "Synced"
|
||||
// is synced successfully
|
||||
MessageResourceSynced = "Synced successfully"
|
||||
)
|
||||
|
||||
// BaseController provides a Controller template for watching a primary resources that defined as CRD.
|
||||
type BaseController struct {
|
||||
// Workers will wait informer caches to be synced
|
||||
|
||||
@@ -18,104 +18,134 @@ package workspace
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
// Add creates a new Workspace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
const (
|
||||
controllerName = "workspace-controller"
|
||||
)
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileWorkspace{Client: mgr.GetClient(), scheme: mgr.GetScheme(),
|
||||
recorder: mgr.GetEventRecorderFor("workspace-controller")}
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("workspace-controller", mgr, controller.Options{Reconciler: r})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to Workspace
|
||||
err = c.Watch(&source.Kind{Type: &tenantv1alpha1.Workspace{}}, &handler.EnqueueRequestForObject{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileWorkspace{}
|
||||
|
||||
// ReconcileWorkspace reconciles a Workspace object
|
||||
type ReconcileWorkspace struct {
|
||||
// Reconciler reconciles a Workspace object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
Logger logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
MaxConcurrentReconciles int
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
if r.Logger == nil {
|
||||
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
}
|
||||
if r.Recorder == nil {
|
||||
r.Recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
}
|
||||
if r.MaxConcurrentReconciles <= 0 {
|
||||
r.MaxConcurrentReconciles = 1
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
|
||||
}).
|
||||
For(&tenantv1alpha1.Workspace{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a Workspace object and makes changes based on the state read
|
||||
// and what is in the Workspace.Spec
|
||||
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces/status,verbs=get;update;patch
|
||||
func (r *ReconcileWorkspace) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
// Fetch the Workspace instance
|
||||
instance := &tenantv1alpha1.Workspace{}
|
||||
err := r.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=users,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=rolebases,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspaceroles,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspacerolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.Logger.WithValues("workspace", req.NamespacedName)
|
||||
rootCtx := context.Background()
|
||||
workspace := &tenantv1alpha1.Workspace{}
|
||||
if err := r.Get(rootCtx, req.NamespacedName, workspace); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// controlled kubefed-controller-manager
|
||||
if workspace.Labels[constants.KubefedManagedLabel] == "true" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// name of your custom finalizer
|
||||
finalizer := "finalizers.tenant.kubesphere.io"
|
||||
|
||||
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if workspace.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object.
|
||||
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) {
|
||||
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, finalizer)
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
if !sliceutil.HasString(workspace.ObjectMeta.Finalizers, finalizer) {
|
||||
workspace.ObjectMeta.Finalizers = append(workspace.ObjectMeta.Finalizers, finalizer)
|
||||
if err := r.Update(rootCtx, workspace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) {
|
||||
// our finalizer is present, so lets handle our external dependency
|
||||
|
||||
if sliceutil.HasString(workspace.ObjectMeta.Finalizers, finalizer) {
|
||||
// remove our finalizer from the list and update it.
|
||||
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
|
||||
workspace.ObjectMeta.Finalizers = sliceutil.RemoveString(workspace.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == finalizer
|
||||
})
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
logger.V(4).Info("update workspace")
|
||||
if err := r.Update(rootCtx, workspace); err != nil {
|
||||
logger.Error(err, "update workspace failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Our finalizer has finished, so the reconciler can do nothing.
|
||||
return reconcile.Result{}, nil
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
var namespaces corev1.NamespaceList
|
||||
if err := r.List(rootCtx, &namespaces, client.MatchingLabels{tenantv1alpha1.WorkspaceLabel: req.Name}); err != nil {
|
||||
logger.Error(err, "list namespaces failed")
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
for _, namespace := range namespaces.Items {
|
||||
if err := r.bindWorkspace(rootCtx, logger, &namespace, workspace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.Recorder.Event(workspace, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, namespace *corev1.Namespace, workspace *tenantv1alpha1.Workspace) error {
|
||||
// owner reference not match workspace label
|
||||
if !metav1.IsControlledBy(namespace, workspace) {
|
||||
namespace := namespace.DeepCopy()
|
||||
namespace.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(namespace.OwnerReferences)
|
||||
if err := controllerutil.SetControllerReference(workspace, namespace, scheme.Scheme); err != nil {
|
||||
logger.Error(err, "set controller reference failed")
|
||||
return err
|
||||
}
|
||||
logger.V(4).Info("update namespace owner reference", "workspace", workspace.Name)
|
||||
if err := r.Update(ctx, namespace); err != nil {
|
||||
logger.Error(err, "update namespace failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,59 +17,84 @@ limitations under the License.
|
||||
package workspace
|
||||
|
||||
import (
|
||||
stdlog "log"
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
var cfg *rest.Config
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
}
|
||||
apis.AddToScheme(scheme.Scheme)
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
var err error
|
||||
if cfg, err = t.Start(); err != nil {
|
||||
stdlog.Fatal(err)
|
||||
}
|
||||
|
||||
code := m.Run()
|
||||
t.Stop()
|
||||
os.Exit(code)
|
||||
func TestWorkspaceController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Workspace Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and
|
||||
// writes the request to requests after Reconcile is finished.
|
||||
func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) {
|
||||
requests := make(chan reconcile.Request)
|
||||
fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) {
|
||||
result, err := inner.Reconcile(req)
|
||||
requests <- req
|
||||
return result, err
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
return fn, requests
|
||||
}
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&Reconciler{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// StartTestManager adds recFn
|
||||
func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) {
|
||||
stop := make(chan struct{})
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred())
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
return stop, wg
|
||||
}
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -15,3 +15,71 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"context"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = Describe("Workspace", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("Workspace Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
|
||||
workspace := &tenantv1alpha1.Workspace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "workspace-test",
|
||||
},
|
||||
}
|
||||
|
||||
// Create
|
||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||
|
||||
By("Expecting to create workspace successfully")
|
||||
Eventually(func() bool {
|
||||
f := &tenantv1alpha1.Workspace{}
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, f)
|
||||
return len(f.Finalizers) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
// Update
|
||||
updated := &tenantv1alpha1.Workspace{}
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, updated)).Should(Succeed())
|
||||
updated.Spec.Manager = "admin"
|
||||
Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed())
|
||||
|
||||
// List workspace role bindings
|
||||
By("Expecting to update workspace successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, workspace)
|
||||
return workspace.Spec.Manager == "admin"
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
// Delete
|
||||
By("Expecting to delete workspace successfully")
|
||||
Eventually(func() error {
|
||||
f := &tenantv1alpha1.Workspace{}
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, f)
|
||||
return k8sClient.Delete(context.Background(), f)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
|
||||
By("Expecting to delete workspace finish")
|
||||
Eventually(func() error {
|
||||
f := &tenantv1alpha1.Workspace{}
|
||||
return k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, f)
|
||||
}, timeout, interval).ShouldNot(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,412 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerole
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
tenantv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
tenantv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "WorkspaceRole synced successfully"
|
||||
controllerName = "workspacerole-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
scheme *runtime.Scheme
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer
|
||||
workspaceRoleLister iamv1alpha2listers.WorkspaceRoleLister
|
||||
workspaceRoleSynced cache.InformerSynced
|
||||
workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer
|
||||
workspaceTemplateLister tenantv1alpha2listers.WorkspaceTemplateLister
|
||||
workspaceTemplateSynced cache.InformerSynced
|
||||
fedWorkspaceRoleCache cache.Store
|
||||
fedWorkspaceRoleCacheController cache.Controller
|
||||
multiClusterEnabled bool
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer,
|
||||
fedWorkspaceRoleCache cache.Store, fedWorkspaceRoleCacheController cache.Controller, workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer, multiClusterEnabled bool) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
workspaceRoleInformer: workspaceRoleInformer,
|
||||
workspaceRoleLister: workspaceRoleInformer.Lister(),
|
||||
workspaceRoleSynced: workspaceRoleInformer.Informer().HasSynced,
|
||||
fedWorkspaceRoleCache: fedWorkspaceRoleCache,
|
||||
fedWorkspaceRoleCacheController: fedWorkspaceRoleCacheController,
|
||||
workspaceTemplateInformer: workspaceTemplateInformer,
|
||||
workspaceTemplateLister: workspaceTemplateInformer.Lister(),
|
||||
workspaceTemplateSynced: workspaceTemplateInformer.Informer().HasSynced,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceRole"),
|
||||
recorder: recorder,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
workspaceRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueWorkspaceRole,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueWorkspaceRole(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueWorkspaceRole,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting WorkspaceRole controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.workspaceRoleSynced, c.workspaceTemplateSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedWorkspaceRoleCacheController.HasSynced)
|
||||
}
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueWorkspaceRole(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
workspaceRole, err := c.workspaceRoleLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("workspacerole '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.bindWorkspace(workspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(workspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.recorder.Event(workspaceRole, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) bindWorkspace(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
|
||||
workspaceName := workspaceRole.Labels[constants.WorkspaceLabelKey]
|
||||
|
||||
if workspaceName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
workspace, err := c.workspaceTemplateLister.Get(workspaceName)
|
||||
|
||||
if err != nil {
|
||||
// skip if workspace not found
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !metav1.IsControlledBy(workspaceRole, workspace) {
|
||||
workspaceRole.OwnerReferences = nil
|
||||
if err := controllerutil.SetControllerReference(workspace, workspaceRole, scheme.Scheme); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
_, err = c.ksClient.IamV1alpha2().WorkspaceRoles().Update(workspaceRole)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(workspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedWorkspaceRoleCache.GetByKey(workspaceRole.Name)
|
||||
if !exist {
|
||||
return c.createFederatedWorkspaceRole(workspaceRole)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedWorkspaceRole iamv1alpha2.FederatedRole
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedWorkspaceRole); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Rules, workspaceRole.Rules) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Labels, workspaceRole.Labels) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Annotations, workspaceRole.Annotations) {
|
||||
|
||||
federatedWorkspaceRole.Spec.Template.Rules = workspaceRole.Rules
|
||||
federatedWorkspaceRole.Spec.Template.Annotations = workspaceRole.Annotations
|
||||
federatedWorkspaceRole.Spec.Template.Labels = workspaceRole.Labels
|
||||
|
||||
return c.updateFederatedGlobalRole(&federatedWorkspaceRole)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedWorkspaceRole(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
federatedWorkspaceRole := &iamv1alpha2.FederatedRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedWorkspaceRoleKind,
|
||||
APIVersion: iamv1alpha2.FedWorkspaceRoleResource.Group + "/" + iamv1alpha2.FedWorkspaceRoleResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceRole.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedRoleSpec{
|
||||
Template: iamv1alpha2.RoleTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: workspaceRole.Labels,
|
||||
Annotations: workspaceRole.Annotations,
|
||||
},
|
||||
Rules: workspaceRole.Rules,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceRole, federatedWorkspaceRole, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleResource.Version, iamv1alpha2.FedWorkspaceRoleResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedGlobalRole(federatedWorkspaceRole *iamv1alpha2.FederatedRole) error {
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleResource.Version, iamv1alpha2.FedWorkspaceRoleResource.Name,
|
||||
federatedWorkspaceRole.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
if workspaceRole.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if workspaceRole.Labels == nil {
|
||||
workspaceRole.Labels = make(map[string]string, 0)
|
||||
}
|
||||
workspaceRole = workspaceRole.DeepCopy()
|
||||
workspaceRole.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().WorkspaceRoles().Update(workspaceRole)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
214
pkg/controller/workspacerole/workspacerole_controller.go
Normal file
214
pkg/controller/workspacerole/workspacerole_controller.go
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerole
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
"reflect"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerName = "workspacerole-controller"
|
||||
)
|
||||
|
||||
// Reconciler reconciles a WorkspaceRole object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
MultiClusterEnabled bool
|
||||
Logger logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
MaxConcurrentReconciles int
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
if r.Logger == nil {
|
||||
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
}
|
||||
if r.Scheme == nil {
|
||||
r.Scheme = mgr.GetScheme()
|
||||
}
|
||||
if r.Recorder == nil {
|
||||
r.Recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
}
|
||||
if r.MaxConcurrentReconciles <= 0 {
|
||||
r.MaxConcurrentReconciles = 1
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
|
||||
}).
|
||||
For(&iamv1alpha2.WorkspaceRole{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspaceroles,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspaceroles,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
|
||||
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.Logger.WithValues("workspacerole", req.NamespacedName)
|
||||
rootCtx := context.Background()
|
||||
workspaceRole := &iamv1alpha2.WorkspaceRole{}
|
||||
err := r.Get(rootCtx, req.NamespacedName, workspaceRole)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// controlled kubefed-controller-manager
|
||||
if workspaceRole.Labels[constants.KubefedManagedLabel] == "true" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if err := r.bindWorkspace(rootCtx, logger, workspaceRole); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if r.MultiClusterEnabled {
|
||||
if err = r.multiClusterSync(rootCtx, logger, workspaceRole); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
r.Recorder.Event(workspaceRole, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
workspaceName := workspaceRole.Labels[constants.WorkspaceLabelKey]
|
||||
if workspaceName == "" {
|
||||
return nil
|
||||
}
|
||||
var workspace tenantv1alpha2.WorkspaceTemplate
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, &workspace); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
if !metav1.IsControlledBy(workspaceRole, &workspace) {
|
||||
workspaceRole = workspaceRole.DeepCopy()
|
||||
workspaceRole.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(workspaceRole.OwnerReferences)
|
||||
if err := controllerutil.SetControllerReference(&workspace, workspaceRole, r.Scheme); err != nil {
|
||||
logger.Error(err, "set controller reference failed")
|
||||
return err
|
||||
}
|
||||
if err := r.Update(ctx, workspaceRole); err != nil {
|
||||
logger.Error(err, "update workspace role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
if err := r.ensureNotControlledByKubefed(ctx, logger, workspaceRole); err != nil {
|
||||
return err
|
||||
}
|
||||
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRole{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: workspaceRole.Name}, federatedWorkspaceRole); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if federatedWorkspaceRole, err := newFederatedWorkspaceRole(workspaceRole); err != nil {
|
||||
logger.Error(err, "create federated workspace role failed")
|
||||
return err
|
||||
} else {
|
||||
if err := r.Create(ctx, federatedWorkspaceRole); err != nil {
|
||||
logger.Error(err, "create federated workspace role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Error(err, "get federated workspace role failed")
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Rules, workspaceRole.Rules) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Labels, workspaceRole.Labels) {
|
||||
|
||||
federatedWorkspaceRole.Spec.Template.Rules = workspaceRole.Rules
|
||||
federatedWorkspaceRole.Spec.Template.Labels = workspaceRole.Labels
|
||||
|
||||
if err := r.Update(ctx, federatedWorkspaceRole); err != nil {
|
||||
logger.Error(err, "update federated workspace role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFederatedWorkspaceRole(workspaceRole *iamv1alpha2.WorkspaceRole) (*typesv1beta1.FederatedWorkspaceRole, error) {
|
||||
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRole{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: typesv1beta1.FederatedWorkspaceRoleKind,
|
||||
APIVersion: typesv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceRole.Name,
|
||||
},
|
||||
Spec: typesv1beta1.FederatedWorkspaceRoleSpec{
|
||||
Template: typesv1beta1.WorkspaceRoleTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: workspaceRole.Labels,
|
||||
},
|
||||
Rules: workspaceRole.Rules,
|
||||
},
|
||||
Placement: typesv1beta1.GenericPlacementFields{
|
||||
ClusterSelector: &metav1.LabelSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := controllerutil.SetControllerReference(workspaceRole, federatedWorkspaceRole, scheme.Scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return federatedWorkspaceRole, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger logr.Logger, workspaceRole *iamv1alpha2.WorkspaceRole) error {
|
||||
if workspaceRole.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if workspaceRole.Labels == nil {
|
||||
workspaceRole.Labels = make(map[string]string)
|
||||
}
|
||||
workspaceRole = workspaceRole.DeepCopy()
|
||||
workspaceRole.Labels[constants.KubefedManagedLabel] = "false"
|
||||
if err := r.Update(ctx, workspaceRole); err != nil {
|
||||
logger.Error(err, "update kubefed managed label failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerole
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestWorkspaceRoleController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"WorkspaceRole Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&Reconciler{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerole
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = Describe("WorkspaceRole", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
workspace := &tenantv1alpha2.WorkspaceTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "workspace1",
|
||||
},
|
||||
}
|
||||
BeforeEach(func() {
|
||||
// Create workspace
|
||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||
})
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("WorkspaceRole Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
workspaceAdmin := &iamv1alpha2.WorkspaceRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-admin", workspace.Name),
|
||||
Labels: map[string]string{tenantv1alpha1.WorkspaceLabel: workspace.Name},
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{},
|
||||
}
|
||||
|
||||
// Create workspace role
|
||||
Expect(k8sClient.Create(context.Background(), workspaceAdmin)).Should(Succeed())
|
||||
|
||||
By("Expecting to create workspace role successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspaceAdmin.Name}, workspaceAdmin)
|
||||
return !workspaceAdmin.CreationTimestamp.IsZero()
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Expecting to set owner reference successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspaceAdmin.Name}, workspaceAdmin)
|
||||
return len(workspaceAdmin.OwnerReferences) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, workspace)).Should(Succeed())
|
||||
|
||||
controlled := true
|
||||
expectedOwnerReference := metav1.OwnerReference{
|
||||
Kind: workspace.Kind,
|
||||
APIVersion: workspace.APIVersion,
|
||||
UID: workspace.UID,
|
||||
Name: workspace.Name,
|
||||
Controller: &controlled,
|
||||
BlockOwnerDeletion: &controlled,
|
||||
}
|
||||
|
||||
By("Expecting to bind workspace successfully")
|
||||
Expect(workspaceAdmin.OwnerReferences).To(ContainElement(expectedOwnerReference))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -17,473 +17,202 @@ limitations under the License.
|
||||
package workspacerolebinding
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"context"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
tenantv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
tenantv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha2"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
"reflect"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "WorkspaceRoleBinding synced successfully"
|
||||
controllerName = "workspacerolebinding-controller"
|
||||
controllerName = "workspacerolebinding-controller"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
workspaceRoleBindingInformer iamv1alpha2informers.WorkspaceRoleBindingInformer
|
||||
workspaceRoleBindingLister iamv1alpha2listers.WorkspaceRoleBindingLister
|
||||
workspaceRoleBindingSynced cache.InformerSynced
|
||||
fedWorkspaceRoleBindingCache cache.Store
|
||||
fedWorkspaceRoleBindingCacheController cache.Controller
|
||||
workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer
|
||||
workspaceTemplateLister tenantv1alpha2listers.WorkspaceTemplateLister
|
||||
workspaceTemplateSynced cache.InformerSynced
|
||||
multiClusterEnabled bool
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
// Reconciler reconciles a WorkspaceRoleBinding object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
Logger logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
MaxConcurrentReconciles int
|
||||
MultiClusterEnabled bool
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceRoleBindingInformer iamv1alpha2informers.WorkspaceRoleBindingInformer,
|
||||
fedWorkspaceRoleBindingCache cache.Store, fedWorkspaceRoleBindingCacheController cache.Controller, workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer, multiClusterEnabled bool) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
workspaceRoleBindingInformer: workspaceRoleBindingInformer,
|
||||
workspaceRoleBindingLister: workspaceRoleBindingInformer.Lister(),
|
||||
workspaceRoleBindingSynced: workspaceRoleBindingInformer.Informer().HasSynced,
|
||||
fedWorkspaceRoleBindingCache: fedWorkspaceRoleBindingCache,
|
||||
fedWorkspaceRoleBindingCacheController: fedWorkspaceRoleBindingCacheController,
|
||||
workspaceTemplateInformer: workspaceTemplateInformer,
|
||||
workspaceTemplateLister: workspaceTemplateInformer.Lister(),
|
||||
workspaceTemplateSynced: workspaceTemplateInformer.Informer().HasSynced,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceRoleBinding"),
|
||||
recorder: recorder,
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
workspaceRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueWorkspaceRoleBinding,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueWorkspaceRoleBinding(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueWorkspaceRoleBinding,
|
||||
})
|
||||
return ctl
|
||||
if r.Logger == nil {
|
||||
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
}
|
||||
if r.Scheme == nil {
|
||||
r.Scheme = mgr.GetScheme()
|
||||
}
|
||||
if r.Recorder == nil {
|
||||
r.Recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
}
|
||||
if r.MaxConcurrentReconciles <= 0 {
|
||||
r.MaxConcurrentReconciles = 1
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
|
||||
}).
|
||||
For(&iamv1alpha2.WorkspaceRoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting WorkspaceRoleBinding controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.workspaceRoleBindingSynced, c.workspaceTemplateSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedWorkspaceRoleBindingCacheController.HasSynced)
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspacerolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspacerolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
|
||||
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.Logger.WithValues("workspacerolebinding", req.NamespacedName)
|
||||
rootCtx := context.Background()
|
||||
workspaceRoleBinding := &iamv1alpha2.WorkspaceRoleBinding{}
|
||||
if err := r.Get(rootCtx, req.NamespacedName, workspaceRoleBinding); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
// controlled kubefed-controller-manager
|
||||
if workspaceRoleBinding.Labels[constants.KubefedManagedLabel] == "true" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
if err := r.bindWorkspace(rootCtx, logger, workspaceRoleBinding); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueWorkspaceRoleBinding(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
workspaceRoleBinding, err := c.workspaceRoleBindingLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("workspacerolebinding '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.bindWorkspace(workspaceRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(workspaceRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
if r.MultiClusterEnabled {
|
||||
if err := r.multiClusterSync(rootCtx, logger, workspaceRoleBinding); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
c.recorder.Event(workspaceRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
r.Recorder.Event(workspaceRoleBinding, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) bindWorkspace(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
|
||||
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
workspaceName := workspaceRoleBinding.Labels[constants.WorkspaceLabelKey]
|
||||
|
||||
if workspaceName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
workspace, err := c.workspaceTemplateLister.Get(workspaceName)
|
||||
|
||||
if err != nil {
|
||||
workspace := &tenantv1alpha2.WorkspaceTemplate{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, workspace); err != nil {
|
||||
// skip if workspace not found
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// owner reference not match workspace label
|
||||
if !metav1.IsControlledBy(workspaceRoleBinding, workspace) {
|
||||
workspaceRoleBinding.OwnerReferences = nil
|
||||
if err := controllerutil.SetControllerReference(workspace, workspaceRoleBinding, scheme.Scheme); err != nil {
|
||||
klog.Error(err)
|
||||
workspaceRoleBinding := workspaceRoleBinding.DeepCopy()
|
||||
workspaceRoleBinding.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(workspaceRoleBinding.OwnerReferences)
|
||||
if err := controllerutil.SetControllerReference(workspace, workspaceRoleBinding, r.Scheme); err != nil {
|
||||
logger.Error(err, "set controller reference failed")
|
||||
return err
|
||||
}
|
||||
_, err = c.ksClient.IamV1alpha2().WorkspaceRoleBindings().Update(workspaceRoleBinding)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
logger.V(4).Info("update owner reference")
|
||||
if err := r.Update(ctx, workspaceRoleBinding); err != nil {
|
||||
logger.Error(err, "update owner reference failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(workspaceRoleBinding); err != nil {
|
||||
klog.Error(err)
|
||||
func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
if err := r.ensureNotControlledByKubefed(ctx, logger, workspaceRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
federatedWorkspaceRoleBinding := &typesv1beta1.FederatedWorkspaceRoleBinding{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: workspaceRoleBinding.Name}, federatedWorkspaceRoleBinding); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if federatedWorkspaceRoleBinding, err := newFederatedWorkspaceRole(workspaceRoleBinding); err != nil {
|
||||
logger.Error(err, "generate federated workspace role binding failed")
|
||||
return err
|
||||
} else {
|
||||
if err := r.Create(ctx, federatedWorkspaceRoleBinding); err != nil {
|
||||
logger.Error(err, "create federated workspace role binding failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Error(err, "get federated workspace role binding failed")
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedWorkspaceRoleBindingCache.GetByKey(workspaceRoleBinding.Name)
|
||||
if !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.RoleRef, workspaceRoleBinding.RoleRef) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Subjects, workspaceRoleBinding.Subjects) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Labels, workspaceRoleBinding.Labels) {
|
||||
|
||||
if !exist {
|
||||
return c.createFederatedWorkspaceRoleBinding(workspaceRoleBinding)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedWorkspaceRoleBinding iamv1alpha2.FederatedRoleBinding
|
||||
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedWorkspaceRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Subjects, workspaceRoleBinding.Subjects) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.RoleRef, workspaceRoleBinding.RoleRef) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Labels, workspaceRoleBinding.Labels) ||
|
||||
!reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Annotations, workspaceRoleBinding.Annotations) {
|
||||
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Subjects = workspaceRoleBinding.Subjects
|
||||
federatedWorkspaceRoleBinding.Spec.Template.RoleRef = workspaceRoleBinding.RoleRef
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Annotations = workspaceRoleBinding.Annotations
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Subjects = workspaceRoleBinding.Subjects
|
||||
federatedWorkspaceRoleBinding.Spec.Template.Labels = workspaceRoleBinding.Labels
|
||||
|
||||
return c.updateFederatedWorkspaceRoleBinding(&federatedWorkspaceRoleBinding)
|
||||
if err := r.Update(ctx, federatedWorkspaceRoleBinding); err != nil {
|
||||
logger.Error(err, "update federated workspace role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) relateToClusterAdmin(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
|
||||
username := findExpectUsername(workspaceRoleBinding)
|
||||
|
||||
// unexpected
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", username, iamv1alpha2.ClusterAdmin),
|
||||
},
|
||||
Subjects: ensureSubjectAPIVersionIsValid(workspaceRoleBinding.Subjects),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: iamv1alpha2.ResourceKindClusterRole,
|
||||
Name: iamv1alpha2.ClusterAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceRoleBinding, clusterRoleBinding, scheme.Scheme)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// binding only one user is expected
|
||||
func findExpectUsername(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) string {
|
||||
for _, subject := range workspaceRoleBinding.Subjects {
|
||||
if subject.Kind == iamv1alpha2.ResourceKindUser {
|
||||
return subject.Name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedWorkspaceRoleBinding(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
federatedWorkspaceRoleBinding := &iamv1alpha2.FederatedRoleBinding{
|
||||
func newFederatedWorkspaceRole(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) (*typesv1beta1.FederatedWorkspaceRoleBinding, error) {
|
||||
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedWorkspaceRoleBindingKind,
|
||||
APIVersion: iamv1alpha2.FedWorkspaceRoleBindingResource.Group + "/" + iamv1alpha2.FedWorkspaceRoleBindingResource.Version,
|
||||
Kind: typesv1beta1.FederatedWorkspaceRoleBindingKind,
|
||||
APIVersion: typesv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceRoleBinding.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedRoleBindingSpec{
|
||||
Template: iamv1alpha2.RoleBindingTemplate{
|
||||
Spec: typesv1beta1.FederatedWorkspaceRoleBindingSpec{
|
||||
Template: typesv1beta1.WorkspaceRoleBindingTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: workspaceRoleBinding.Labels,
|
||||
Annotations: workspaceRoleBinding.Annotations,
|
||||
Labels: workspaceRoleBinding.Labels,
|
||||
},
|
||||
Subjects: workspaceRoleBinding.Subjects,
|
||||
RoleRef: workspaceRoleBinding.RoleRef,
|
||||
Subjects: workspaceRoleBinding.Subjects,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
Placement: typesv1beta1.GenericPlacementFields{
|
||||
ClusterSelector: &metav1.LabelSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceRoleBinding, federatedWorkspaceRoleBinding, scheme.Scheme)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
if err := controllerutil.SetControllerReference(workspaceRoleBinding, federatedWorkspaceRole, scheme.Scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleBindingResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleBindingResource.Version, iamv1alpha2.FedWorkspaceRoleBindingResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return federatedWorkspaceRole, nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedWorkspaceRoleBinding(federatedWorkspaceRoleBinding *iamv1alpha2.FederatedRoleBinding) error {
|
||||
|
||||
data, err := json.Marshal(federatedWorkspaceRoleBinding)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleBindingResource.Group,
|
||||
iamv1alpha2.FedWorkspaceRoleBindingResource.Version, iamv1alpha2.FedWorkspaceRoleBindingResource.Name,
|
||||
federatedWorkspaceRoleBinding.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger logr.Logger, workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
|
||||
if workspaceRoleBinding.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if workspaceRoleBinding.Labels == nil {
|
||||
workspaceRoleBinding.Labels = make(map[string]string, 0)
|
||||
workspaceRoleBinding.Labels = make(map[string]string)
|
||||
}
|
||||
workspaceRoleBinding = workspaceRoleBinding.DeepCopy()
|
||||
workspaceRoleBinding.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().WorkspaceRoleBindings().Update(workspaceRoleBinding)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
logger.V(4).Info("update kubefed managed label")
|
||||
if err := r.Update(ctx, workspaceRoleBinding); err != nil {
|
||||
logger.Error(err, "update kubefed managed label failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureSubjectAPIVersionIsValid(subjects []rbacv1.Subject) []rbacv1.Subject {
|
||||
validSubjects := make([]rbacv1.Subject, 0)
|
||||
for _, subject := range subjects {
|
||||
if subject.Kind == iamv1alpha2.ResourceKindUser {
|
||||
validSubject := rbacv1.Subject{
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Name: subject.Name,
|
||||
}
|
||||
validSubjects = append(validSubjects, validSubject)
|
||||
}
|
||||
}
|
||||
return validSubjects
|
||||
}
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerolebinding
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestWorkspaceRoleBindingController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"WorkspaceRoleBinding Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&Reconciler{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacerolebinding
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = Describe("WorkspaceRoleBinding", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
workspace := &tenantv1alpha2.WorkspaceTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "workspace1",
|
||||
},
|
||||
}
|
||||
BeforeEach(func() {
|
||||
// Create workspace
|
||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||
})
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("WorkspaceRoleBinding Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
workspaceAdminBinding := &iamv1alpha2.WorkspaceRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("admin-workspace1-admin"),
|
||||
Labels: map[string]string{tenantv1alpha1.WorkspaceLabel: workspace.Name},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: iamv1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: iamv1alpha2.FedWorkspaceRoleKind,
|
||||
Name: "workspace1-admin",
|
||||
},
|
||||
}
|
||||
|
||||
// Create workspace role binding
|
||||
Expect(k8sClient.Create(context.Background(), workspaceAdminBinding)).Should(Succeed())
|
||||
|
||||
By("Expecting to create workspace role successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspaceAdminBinding.Name}, workspaceAdminBinding)
|
||||
return !workspaceAdminBinding.CreationTimestamp.IsZero()
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Expecting to set owner reference successfully")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: workspaceAdminBinding.Name}, workspaceAdminBinding)
|
||||
return len(workspaceAdminBinding.OwnerReferences) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: workspace.Name}, workspace)).Should(Succeed())
|
||||
|
||||
controlled := true
|
||||
expectedOwnerReference := metav1.OwnerReference{
|
||||
Kind: workspace.Kind,
|
||||
APIVersion: workspace.APIVersion,
|
||||
UID: workspace.UID,
|
||||
Name: workspace.Name,
|
||||
Controller: &controlled,
|
||||
BlockOwnerDeletion: &controlled,
|
||||
}
|
||||
|
||||
By("Expecting to bind workspace successfully")
|
||||
Expect(workspaceAdminBinding.OwnerReferences).To(ContainElement(expectedOwnerReference))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -18,463 +18,310 @@ package workspacetemplate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
tenantv1alpha1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
|
||||
tenantv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha2"
|
||||
typesv1beta1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/types/v1beta1"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
tenantv1alpha1listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha1"
|
||||
tenantv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha2"
|
||||
typesv1beta1listers "kubesphere.io/kubesphere/pkg/client/listers/types/v1beta1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"reflect"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "WorkspaceTemplate synced successfully"
|
||||
controllerName = "workspacetemplate-controller"
|
||||
controllerName = "workspacetemplate-controller"
|
||||
)
|
||||
|
||||
type controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
workspaceTemplateLister tenantv1alpha2listers.WorkspaceTemplateLister
|
||||
workspaceTemplateSynced cache.InformerSynced
|
||||
workspaceRoleLister iamv1alpha2listers.WorkspaceRoleLister
|
||||
workspaceRoleSynced cache.InformerSynced
|
||||
roleBaseLister iamv1alpha2listers.RoleBaseLister
|
||||
roleBaseSynced cache.InformerSynced
|
||||
workspaceLister tenantv1alpha1listers.WorkspaceLister
|
||||
workspaceSynced cache.InformerSynced
|
||||
federatedWorkspaceLister typesv1beta1listers.FederatedWorkspaceLister
|
||||
federatedWorkspaceSynced cache.InformerSynced
|
||||
multiClusterEnabled bool
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
// Reconciler reconciles a WorkspaceRoleBinding object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
Logger logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
MaxConcurrentReconciles int
|
||||
MultiClusterEnabled bool
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
|
||||
workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer,
|
||||
workspaceInformer tenantv1alpha1informers.WorkspaceInformer,
|
||||
roleBaseInformer iamv1alpha2informers.RoleBaseInformer,
|
||||
workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer,
|
||||
federatedWorkspaceInformer typesv1beta1informers.FederatedWorkspaceInformer,
|
||||
multiClusterEnabled bool) *controller {
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
workspaceTemplateLister: workspaceTemplateInformer.Lister(),
|
||||
workspaceTemplateSynced: workspaceTemplateInformer.Informer().HasSynced,
|
||||
workspaceLister: workspaceInformer.Lister(),
|
||||
workspaceSynced: workspaceInformer.Informer().HasSynced,
|
||||
workspaceRoleLister: workspaceRoleInformer.Lister(),
|
||||
workspaceRoleSynced: workspaceRoleInformer.Informer().HasSynced,
|
||||
roleBaseLister: roleBaseInformer.Lister(),
|
||||
roleBaseSynced: roleBaseInformer.Informer().HasSynced,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceTemplate"),
|
||||
recorder: recorder,
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
|
||||
if multiClusterEnabled {
|
||||
ctl.federatedWorkspaceLister = federatedWorkspaceInformer.Lister()
|
||||
ctl.federatedWorkspaceSynced = federatedWorkspaceInformer.Informer().HasSynced
|
||||
if r.Logger == nil {
|
||||
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
}
|
||||
|
||||
klog.Info("Setting up event handlers")
|
||||
workspaceTemplateInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueWorkspaceTemplate,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueWorkspaceTemplate(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueWorkspaceTemplate,
|
||||
})
|
||||
return ctl
|
||||
if r.Recorder == nil {
|
||||
r.Recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
}
|
||||
if r.MaxConcurrentReconciles <= 0 {
|
||||
r.MaxConcurrentReconciles = 1
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
|
||||
}).
|
||||
For(&tenantv1alpha2.WorkspaceTemplate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (c *controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting WorkspaceTemplate controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.workspaceTemplateSynced, c.workspaceSynced, c.workspaceRoleSynced, c.roleBaseSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.federatedWorkspaceSynced)
|
||||
}
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspacerolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspacerolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
|
||||
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.Logger.WithValues("workspacetemplate", req.NamespacedName)
|
||||
rootCtx := context.Background()
|
||||
workspaceTemplate := &tenantv1alpha2.WorkspaceTemplate{}
|
||||
if err := r.Get(rootCtx, req.NamespacedName, workspaceTemplate); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) enqueueWorkspaceTemplate(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
err := func(obj interface{}) error {
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
if key, ok = obj.(string); !ok {
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
if err := c.reconcile(key); err != nil {
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler compares the actual state with the desired, and attempts to
|
||||
// converge the two. It then updates the Status block of the Foo resource
|
||||
// with the current status of the resource.
|
||||
func (c *controller) reconcile(key string) error {
|
||||
workspaceTemplate, err := c.workspaceTemplateLister.Get(key)
|
||||
if err != nil {
|
||||
// The user may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("workspace template '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.initRoles(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.initManagerRoleBinding(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
if r.MultiClusterEnabled {
|
||||
if err := r.multiClusterSync(rootCtx, logger, workspaceTemplate); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
if err = c.sync(workspaceTemplate); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
if err := r.singleClusterSync(rootCtx, logger, workspaceTemplate); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
c.recorder.Event(workspaceTemplate, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
if err := r.initWorkspaceRoles(rootCtx, logger, workspaceTemplate); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := r.initManagerRoleBinding(rootCtx, logger, workspaceTemplate); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
r.Recorder.Event(workspaceTemplate, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (c *controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *controller) multiClusterSync(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
// multi cluster environment, synchronize workspaces with kubefed
|
||||
federatedWorkspace, err := c.federatedWorkspaceLister.Get(workspaceTemplate.Name)
|
||||
if err != nil {
|
||||
// create federatedworkspace if not found
|
||||
func (r *Reconciler) singleClusterSync(ctx context.Context, logger logr.Logger, workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
workspace := &tenantv1alpha1.Workspace{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: workspaceTemplate.Name}, workspace); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return c.createFederatedWorkspace(workspaceTemplate)
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
// update spec
|
||||
if !reflect.DeepEqual(federatedWorkspace.Spec, workspaceTemplate.Spec) {
|
||||
federatedWorkspace.Spec = workspaceTemplate.Spec
|
||||
if err = c.updateFederatedWorkspace(federatedWorkspace); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) createFederatedWorkspace(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
federatedWorkspace := &typesv1beta1.FederatedWorkspace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceTemplate.Name,
|
||||
},
|
||||
Spec: workspaceTemplate.Spec,
|
||||
}
|
||||
|
||||
if err := controllerutil.SetControllerReference(workspaceTemplate, federatedWorkspace, scheme.Scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := c.ksClient.TypesV1beta1().FederatedWorkspaces().Create(federatedWorkspace); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) sync(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
workspace, err := c.workspaceLister.Get(workspaceTemplate.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return c.createWorkspace(workspaceTemplate)
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(workspace.Spec, workspaceTemplate.Spec.Template.Spec) ||
|
||||
!reflect.DeepEqual(workspace.Labels, workspaceTemplate.Spec.Template.Labels) ||
|
||||
!reflect.DeepEqual(workspace.Annotations, workspaceTemplate.Spec.Template.Annotations) {
|
||||
|
||||
workspace = workspace.DeepCopy()
|
||||
workspace.Spec = workspaceTemplate.Spec.Template.Spec
|
||||
workspace.Labels = workspaceTemplate.Spec.Template.Labels
|
||||
workspace.Annotations = workspaceTemplate.Spec.Template.Annotations
|
||||
|
||||
return c.updateWorkspace(workspace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) createWorkspace(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
workspace := &tenantv1alpha1.Workspace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: workspaceTemplate.Name,
|
||||
Labels: workspaceTemplate.Spec.Template.Labels,
|
||||
Annotations: workspaceTemplate.Spec.Template.Annotations,
|
||||
},
|
||||
Spec: workspaceTemplate.Spec.Template.Spec,
|
||||
}
|
||||
|
||||
err := controllerutil.SetControllerReference(workspaceTemplate, workspace, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.ksClient.TenantV1alpha1().Workspaces().Create(workspace)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) updateWorkspace(workspace *tenantv1alpha1.Workspace) error {
|
||||
_, err := c.ksClient.TenantV1alpha1().Workspaces().Update(workspace)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) initRoles(workspace *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
roleBases, err := c.roleBaseLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
for _, roleBase := range roleBases {
|
||||
var role iamv1alpha2.WorkspaceRole
|
||||
if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil && role.Kind == iamv1alpha2.ResourceKindWorkspaceRole {
|
||||
roleName := fmt.Sprintf("%s-%s", workspace.Name, role.Name)
|
||||
if role.Labels == nil {
|
||||
role.Labels = make(map[string]string, 0)
|
||||
}
|
||||
// make sure workspace label always exist
|
||||
role.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name
|
||||
role.Name = roleName
|
||||
old, err := c.workspaceRoleLister.Get(roleName)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = c.ksClient.IamV1alpha2().WorkspaceRoles().Create(&role)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(role.Labels, old.Labels) ||
|
||||
!reflect.DeepEqual(role.Annotations, old.Annotations) ||
|
||||
!reflect.DeepEqual(role.Rules, old.Rules) {
|
||||
updated := old.DeepCopy()
|
||||
updated.Labels = role.Labels
|
||||
updated.Annotations = role.Annotations
|
||||
updated.Rules = role.Rules
|
||||
_, err = c.ksClient.IamV1alpha2().WorkspaceRoles().Update(updated)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if workspace, err := newWorkspace(workspaceTemplate); err != nil {
|
||||
logger.Error(err, "generate workspace failed")
|
||||
return err
|
||||
} else {
|
||||
if err := r.Create(ctx, workspace); err != nil {
|
||||
logger.Error(err, "create workspace failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Error(err, "get workspace failed")
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(workspace.Spec, workspaceTemplate.Spec.Template.Spec) ||
|
||||
!reflect.DeepEqual(workspace.Labels, workspaceTemplate.Spec.Template.Labels) {
|
||||
|
||||
workspace = workspace.DeepCopy()
|
||||
workspace.Spec = workspaceTemplate.Spec.Template.Spec
|
||||
workspace.Labels = workspaceTemplate.Spec.Template.Labels
|
||||
|
||||
if err := r.Update(ctx, workspace); err != nil {
|
||||
logger.Error(err, "update workspace failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) resetWorkspaceOwner(workspace *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
workspace = workspace.DeepCopy()
|
||||
workspace.Spec.Template.Spec.Manager = ""
|
||||
_, err := c.ksClient.TenantV1alpha2().WorkspaceTemplates().Update(workspace)
|
||||
klog.V(4).Infof("update workspace after manager has been deleted")
|
||||
return err
|
||||
func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
if err := r.ensureNotControlledByKubefed(ctx, logger, workspaceTemplate); err != nil {
|
||||
return err
|
||||
}
|
||||
federatedWorkspace := &typesv1beta1.FederatedWorkspace{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{Name: workspaceTemplate.Name}, federatedWorkspace); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if federatedWorkspace, err := newFederatedWorkspace(workspaceTemplate); err != nil {
|
||||
logger.Error(err, "generate federated workspace failed")
|
||||
return err
|
||||
} else {
|
||||
if err := r.Create(ctx, federatedWorkspace); err != nil {
|
||||
logger.Error(err, "create federated workspace failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Error(err, "get federated workspace failed")
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedWorkspace.Spec, workspaceTemplate.Spec) ||
|
||||
!reflect.DeepEqual(federatedWorkspace.Labels, workspaceTemplate.Labels) {
|
||||
|
||||
federatedWorkspace.Spec = workspaceTemplate.Spec
|
||||
federatedWorkspace.Labels = workspaceTemplate.Labels
|
||||
|
||||
if err := r.Update(ctx, federatedWorkspace); err != nil {
|
||||
logger.Error(err, "update federated workspace failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) initManagerRoleBinding(workspace *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
func newFederatedWorkspace(template *tenantv1alpha2.WorkspaceTemplate) (*typesv1beta1.FederatedWorkspace, error) {
|
||||
federatedWorkspace := &typesv1beta1.FederatedWorkspace{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: typesv1beta1.FederatedWorkspaceRoleKind,
|
||||
APIVersion: typesv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: template.Name,
|
||||
Labels: template.Labels,
|
||||
},
|
||||
Spec: template.Spec,
|
||||
}
|
||||
if err := controllerutil.SetControllerReference(template, federatedWorkspace, scheme.Scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return federatedWorkspace, nil
|
||||
}
|
||||
|
||||
func newWorkspace(template *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha1.Workspace, error) {
|
||||
workspace := &tenantv1alpha1.Workspace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: template.Name,
|
||||
Labels: template.Spec.Template.Labels,
|
||||
},
|
||||
Spec: template.Spec.Template.Spec,
|
||||
}
|
||||
if err := controllerutil.SetControllerReference(template, workspace, scheme.Scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return workspace, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger logr.Logger, workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
if workspaceTemplate.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if workspaceTemplate.Labels == nil {
|
||||
workspaceTemplate.Labels = make(map[string]string)
|
||||
}
|
||||
workspaceTemplate = workspaceTemplate.DeepCopy()
|
||||
workspaceTemplate.Labels[constants.KubefedManagedLabel] = "false"
|
||||
logger.V(4).Info("update kubefed managed label")
|
||||
if err := r.Update(ctx, workspaceTemplate); err != nil {
|
||||
logger.Error(err, "update kubefed managed label failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) initWorkspaceRoles(ctx context.Context, logger logr.Logger, workspace *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
var templates iamv1alpha2.RoleBaseList
|
||||
if err := r.List(ctx, &templates); err != nil {
|
||||
logger.Error(err, "list role base failed")
|
||||
return err
|
||||
}
|
||||
for _, template := range templates.Items {
|
||||
var expected iamv1alpha2.WorkspaceRole
|
||||
if err := yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(template.Role.Raw), 1024).Decode(&expected); err == nil && expected.Kind == iamv1alpha2.ResourceKindWorkspaceRole {
|
||||
expected.Name = fmt.Sprintf("%s-%s", workspace.Name, expected.Name)
|
||||
if expected.Labels == nil {
|
||||
expected.Labels = make(map[string]string)
|
||||
}
|
||||
expected.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name
|
||||
var existed iamv1alpha2.WorkspaceRole
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: expected.Name}, &existed); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
logger.V(4).Info("create workspace role", "workspacerole", expected.Name)
|
||||
if err := r.Create(ctx, &expected); err != nil {
|
||||
logger.Error(err, "create workspace role failed")
|
||||
return err
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
logger.Error(err, "get workspace role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(expected.Labels, existed.Labels) ||
|
||||
!reflect.DeepEqual(expected.Annotations, existed.Annotations) ||
|
||||
!reflect.DeepEqual(expected.Rules, existed.Rules) {
|
||||
updated := existed.DeepCopy()
|
||||
updated.Labels = expected.Labels
|
||||
updated.Annotations = expected.Annotations
|
||||
updated.Rules = expected.Rules
|
||||
logger.V(4).Info("update workspace role", "workspacerole", updated.Name)
|
||||
if err := r.Update(ctx, updated); err != nil {
|
||||
logger.Error(err, "update workspace role failed")
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if err != nil {
|
||||
logger.Error(fmt.Errorf("invalid role base found"), "init workspace roles failed", "name", template.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) initManagerRoleBinding(ctx context.Context, logger logr.Logger, workspace *tenantv1alpha2.WorkspaceTemplate) error {
|
||||
manager := workspace.Spec.Template.Spec.Manager
|
||||
if manager == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
user, err := c.ksClient.IamV1alpha2().Users().Get(manager, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// skip if user has been deleted
|
||||
if errors.IsNotFound(err) {
|
||||
return c.resetWorkspaceOwner(workspace)
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
var user iamv1alpha2.User
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: manager}, &user); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// skip if user has been deleted
|
||||
if !user.DeletionTimestamp.IsZero() {
|
||||
return c.resetWorkspaceOwner(workspace)
|
||||
return nil
|
||||
}
|
||||
|
||||
workspaceAdminRoleName := fmt.Sprintf(iamv1alpha2.WorkspaceAdminFormat, workspace.Name)
|
||||
workspaceAdminRoleName := fmt.Sprintf("%s-admin", workspace.Name)
|
||||
managerRoleBinding := &iamv1alpha2.WorkspaceRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", manager, workspaceAdminRoleName),
|
||||
Labels: map[string]string{
|
||||
tenantv1alpha1.WorkspaceLabel: workspace.Name,
|
||||
iamv1alpha2.UserReferenceLabel: manager,
|
||||
},
|
||||
Name: workspaceAdminRoleName,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
}
|
||||
|
||||
if _, err := ctrl.CreateOrUpdate(ctx, r.Client, managerRoleBinding, workspaceRoleBindingChanger(managerRoleBinding, workspace.Name, manager, workspaceAdminRoleName)); err != nil {
|
||||
logger.Error(err, "create workspace manager role binding failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func workspaceRoleBindingChanger(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding, workspace, username, workspaceRoleName string) controllerutil.MutateFn {
|
||||
return func() error {
|
||||
workspaceRoleBinding.Labels = map[string]string{
|
||||
tenantv1alpha1.WorkspaceLabel: workspace,
|
||||
iamv1alpha2.UserReferenceLabel: username,
|
||||
}
|
||||
|
||||
workspaceRoleBinding.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: iamv1alpha2.SchemeGroupVersion.Group,
|
||||
Kind: iamv1alpha2.ResourceKindWorkspaceRole,
|
||||
Name: workspaceAdminRoleName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
Name: workspaceRoleName,
|
||||
}
|
||||
|
||||
workspaceRoleBinding.Subjects = []rbacv1.Subject{
|
||||
{
|
||||
Name: manager,
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
Name: username,
|
||||
Kind: rbacv1.UserKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = c.ksClient.IamV1alpha2().WorkspaceRoleBindings().Create(managerRoleBinding)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) updateFederatedWorkspace(workspace *typesv1beta1.FederatedWorkspace) error {
|
||||
_, err := c.ksClient.TypesV1beta1().FederatedWorkspaces().Update(workspace)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacetemplate
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestWorkspaceTemplateController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"WorkspaceTemplate Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&Reconciler{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@@ -0,0 +1,146 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package workspacetemplate
|
||||
|
||||
import (
|
||||
"context"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = Describe("WorkspaceTemplate", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
BeforeEach(func() {
|
||||
workspaceAdmin := newWorkspaceAdmin()
|
||||
|
||||
err := k8sClient.Create(context.Background(), &workspaceAdmin)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
admin := iamv1alpha2.User{ObjectMeta: metav1.ObjectMeta{Name: "admin"}}
|
||||
err = k8sClient.Create(context.Background(), &admin)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("WorkspaceTemplate Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
key := types.NamespacedName{
|
||||
Name: "workspace-template",
|
||||
}
|
||||
|
||||
created := &tenantv1alpha2.WorkspaceTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: key.Name,
|
||||
},
|
||||
}
|
||||
|
||||
// Create
|
||||
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
|
||||
|
||||
By("Expecting to create workspace template successfully")
|
||||
Eventually(func() bool {
|
||||
f := &tenantv1alpha2.WorkspaceTemplate{}
|
||||
k8sClient.Get(context.Background(), key, f)
|
||||
return !f.CreationTimestamp.IsZero()
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Expecting to create workspace successfully")
|
||||
Eventually(func() bool {
|
||||
f := &tenantv1alpha1.Workspace{}
|
||||
k8sClient.Get(context.Background(), key, f)
|
||||
return !f.CreationTimestamp.IsZero()
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
// List workspace roles
|
||||
By("Expecting to create workspace role successfully")
|
||||
Eventually(func() bool {
|
||||
f := &iamv1alpha2.WorkspaceRoleList{}
|
||||
k8sClient.List(context.Background(), f, &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{tenantv1alpha1.WorkspaceLabel: key.Name})})
|
||||
return len(f.Items) == 1
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
// Update
|
||||
updated := &tenantv1alpha2.WorkspaceTemplate{}
|
||||
Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed())
|
||||
updated.Spec.Template.Spec.Manager = "admin"
|
||||
Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed())
|
||||
|
||||
// List workspace role bindings
|
||||
By("Expecting to create workspace manager role binding successfully")
|
||||
Eventually(func() bool {
|
||||
f := &iamv1alpha2.WorkspaceRoleBindingList{}
|
||||
k8sClient.List(context.Background(), f, &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{tenantv1alpha1.WorkspaceLabel: key.Name})})
|
||||
return len(f.Items) == 1
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
// Delete
|
||||
By("Expecting to delete workspace successfully")
|
||||
Eventually(func() error {
|
||||
f := &tenantv1alpha2.WorkspaceTemplate{}
|
||||
k8sClient.Get(context.Background(), key, f)
|
||||
return k8sClient.Delete(context.Background(), f)
|
||||
}, timeout, interval).Should(Succeed())
|
||||
|
||||
By("Expecting to delete workspace finish")
|
||||
Eventually(func() error {
|
||||
f := &tenantv1alpha2.WorkspaceTemplate{}
|
||||
return k8sClient.Get(context.Background(), key, f)
|
||||
}, timeout, interval).ShouldNot(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newWorkspaceAdmin() iamv1alpha2.RoleBase {
|
||||
return iamv1alpha2.RoleBase{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "workspace-admin"},
|
||||
Role: runtime.RawExtension{
|
||||
Raw: []byte(`{
|
||||
"apiVersion": "iam.kubesphere.io/v1alpha2",
|
||||
"kind": "WorkspaceRole",
|
||||
"metadata": {
|
||||
"name": "admin"
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": [
|
||||
"*"
|
||||
],
|
||||
"resources": [
|
||||
"*"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`)}}
|
||||
}
|
||||
Reference in New Issue
Block a user