feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -1,212 +1,270 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package workspacerole
import (
"context"
"reflect"
"fmt"
"strings"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
tenantv1alpha2 "kubesphere.io/api/tenant/v1alpha2"
typesv1beta1 "kubesphere.io/api/types/v1beta1"
rbachelper "kubesphere.io/kubesphere/pkg/componenthelper/auth/rbac"
"kubesphere.io/kubesphere/pkg/constants"
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"kubesphere.io/kubesphere/pkg/controller/cluster/predicate"
clusterutils "kubesphere.io/kubesphere/pkg/controller/cluster/utils"
"kubesphere.io/kubesphere/pkg/controller/workspacetemplate/utils"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
)
const (
controllerName = "workspacerole-controller"
controllerName = "workspacerole"
finalizer = "finalizers.kubesphere.io/workspaceroles"
)
var _ kscontroller.Controller = &Reconciler{}
var _ reconcile.Reconciler = &Reconciler{}
func (r *Reconciler) Name() string {
return controllerName
}
// Reconciler reconciles a WorkspaceRole object
type Reconciler struct {
client.Client
MultiClusterEnabled bool
Logger logr.Logger
Scheme *runtime.Scheme
Recorder record.EventRecorder
MaxConcurrentReconciles int
logger logr.Logger
recorder record.EventRecorder
helper *rbachelper.Helper
ClusterClient clusterclient.Interface
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Client == nil {
r.Client = mgr.GetClient()
}
if r.Logger.GetSink() == nil {
r.Logger = ctrl.Log.WithName("controllers").WithName(controllerName)
}
if r.Scheme == nil {
r.Scheme = mgr.GetScheme()
}
if r.Recorder == nil {
r.Recorder = mgr.GetEventRecorderFor(controllerName)
}
if r.MaxConcurrentReconciles <= 0 {
r.MaxConcurrentReconciles = 1
}
func (r *Reconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.ClusterClient = mgr.ClusterClient
r.Client = mgr.GetClient()
r.logger = ctrl.Log.WithName("controllers").WithName(controllerName)
r.recorder = mgr.GetEventRecorderFor(controllerName)
r.helper = rbachelper.NewHelper(r.Client)
return ctrl.NewControllerManagedBy(mgr).
Named(controllerName).
WithOptions(controller.Options{
MaxConcurrentReconciles: r.MaxConcurrentReconciles,
}).
For(&iamv1alpha2.WorkspaceRole{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 2}).
For(&iamv1beta1.WorkspaceRole{}).
Watches(
&clusterv1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.mapper),
builder.WithPredicates(predicate.ClusterStatusChangedPredicate{}),
).
Complete(r)
}
func (r *Reconciler) mapper(ctx context.Context, o client.Object) []reconcile.Request {
cluster := o.(*clusterv1alpha1.Cluster)
if !clusterutils.IsClusterReady(cluster) {
return []reconcile.Request{}
}
workspaceRoles := &iamv1beta1.WorkspaceRoleList{}
if err := r.List(ctx, workspaceRoles); err != nil {
r.logger.Error(err, "failed to list workspace roles")
return []reconcile.Request{}
}
var result []reconcile.Request
for _, workspaceRole := range workspaceRoles.Items {
workspaceTemplate := &tenantv1beta1.WorkspaceTemplate{}
workspaceName := workspaceRole.Labels[tenantv1beta1.WorkspaceLabel]
if err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, workspaceTemplate); err != nil {
klog.Warningf("failed to get workspace template %s: %s", workspaceName, err)
continue
}
if utils.WorkspaceTemplateMatchTargetCluster(workspaceTemplate, cluster) {
result = append(result, reconcile.Request{NamespacedName: types.NamespacedName{Name: workspaceRole.Name}})
}
}
return result
}
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspaceroles,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspaceroles,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("workspacerole", req.NamespacedName)
rootCtx := context.Background()
workspaceRole := &iamv1alpha2.WorkspaceRole{}
err := r.Get(rootCtx, req.NamespacedName, workspaceRole)
if err != nil {
logger := r.logger.WithValues("workspacerole", req.NamespacedName)
workspaceRole := &iamv1beta1.WorkspaceRole{}
if err := r.Get(ctx, req.NamespacedName, workspaceRole); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// controlled kubefed-controller-manager
if workspaceRole.Labels[constants.KubefedManagedLabel] == "true" {
if workspaceRole.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object.
if !controllerutil.ContainsFinalizer(workspaceRole, finalizer) {
expected := workspaceRole.DeepCopy()
controllerutil.AddFinalizer(expected, finalizer)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(workspaceRole))
}
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(workspaceRole, finalizer) {
if err := r.deleteRelatedResources(ctx, workspaceRole); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete related resources: %s", err)
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(workspaceRole, finalizer)
if err := r.Update(ctx, workspaceRole, &client.UpdateOptions{}); err != nil {
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
if err := r.bindWorkspace(rootCtx, logger, workspaceRole); err != nil {
if err := r.bindWorkspace(ctx, logger, workspaceRole); err != nil {
return ctrl.Result{}, err
}
if r.MultiClusterEnabled {
if err = r.multiClusterSync(rootCtx, logger, workspaceRole); err != nil {
if workspaceRole.AggregationRoleTemplates != nil {
if err := r.helper.AggregationRole(ctx, rbachelper.WorkspaceRoleRuleOwner{WorkspaceRole: workspaceRole}, r.recorder); err != nil {
return ctrl.Result{}, err
}
}
r.Recorder.Event(workspaceRole, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
if err := r.multiClusterSync(ctx, workspaceRole); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, workspaceRole *iamv1alpha2.WorkspaceRole) error {
func (r *Reconciler) deleteRelatedResources(ctx context.Context, workspaceRole *iamv1beta1.WorkspaceRole) error {
clusters, err := r.ClusterClient.ListClusters(ctx)
if err != nil {
return fmt.Errorf("failed to list clusters: %s", err)
}
var notReadyClusters []string
for _, cluster := range clusters {
if clusterutils.IsHostCluster(&cluster) {
continue
}
// skip if cluster is not ready
if !clusterutils.IsClusterReady(&cluster) {
notReadyClusters = append(notReadyClusters, cluster.Name)
continue
}
clusterClient, err := r.ClusterClient.GetRuntimeClient(cluster.Name)
if err != nil {
return fmt.Errorf("failed to get cluster client: %s", err)
}
if err = clusterClient.Delete(ctx, &iamv1beta1.WorkspaceRole{ObjectMeta: metav1.ObjectMeta{Name: workspaceRole.Name}}); err != nil {
if errors.IsNotFound(err) {
continue
}
return err
}
}
if len(notReadyClusters) > 0 {
err = fmt.Errorf("cluster not ready: %s", strings.Join(notReadyClusters, ","))
klog.FromContext(ctx).Error(err, "failed to delete related resources")
r.recorder.Event(workspaceRole, corev1.EventTypeWarning, kscontroller.SyncFailed, fmt.Sprintf("cluster not ready: %s", strings.Join(notReadyClusters, ",")))
return err
}
return nil
}
func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, workspaceRole *iamv1beta1.WorkspaceRole) error {
workspaceName := workspaceRole.Labels[constants.WorkspaceLabelKey]
if workspaceName == "" {
return nil
}
var workspace tenantv1alpha2.WorkspaceTemplate
if err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, &workspace); err != nil {
var workspace tenantv1beta1.WorkspaceTemplate
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, &workspace); err != nil {
return client.IgnoreNotFound(err)
}
if !metav1.IsControlledBy(workspaceRole, &workspace) {
workspaceRole.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(workspaceRole.OwnerReferences)
if err := controllerutil.SetControllerReference(&workspace, workspaceRole, r.Scheme()); err != nil {
return err
}
return r.Update(ctx, workspaceRole)
}
return nil
})
if err != nil {
return fmt.Errorf("failed to update workspace role %s: %s", workspaceRole.Name, err)
}
return nil
}
func (r *Reconciler) multiClusterSync(ctx context.Context, workspaceRole *iamv1beta1.WorkspaceRole) error {
clusters, err := r.ClusterClient.ListClusters(ctx)
if err != nil {
return fmt.Errorf("failed to list clusters: %s", err)
}
var notReadyClusters []string
for _, cluster := range clusters {
// skip if cluster is not ready
if !clusterutils.IsClusterReady(&cluster) {
notReadyClusters = append(notReadyClusters, cluster.Name)
continue
}
if clusterutils.IsHostCluster(&cluster) {
continue
}
if err := r.syncWorkspaceRole(ctx, cluster, workspaceRole); err != nil {
return fmt.Errorf("failed to sync workspace role %s to cluster %s: %s", workspaceRole.Name, cluster.Name, err)
}
}
if len(notReadyClusters) > 0 {
klog.FromContext(ctx).V(4).Info("cluster not ready", "clusters", strings.Join(notReadyClusters, ","))
r.recorder.Event(workspaceRole, corev1.EventTypeWarning, kscontroller.SyncFailed, fmt.Sprintf("cluster not ready: %s", strings.Join(notReadyClusters, ",")))
}
return nil
}
func (r *Reconciler) syncWorkspaceRole(ctx context.Context, cluster clusterv1alpha1.Cluster, workspaceRole *iamv1beta1.WorkspaceRole) error {
clusterClient, err := r.ClusterClient.GetRuntimeClient(cluster.Name)
if err != nil {
return fmt.Errorf("failed to get cluster client: %s", err)
}
workspaceTemplate := &tenantv1beta1.WorkspaceTemplate{}
if err := r.Get(ctx, types.NamespacedName{Name: workspaceRole.Labels[tenantv1beta1.WorkspaceLabel]}, workspaceTemplate); err != nil {
return client.IgnoreNotFound(err)
}
if !metav1.IsControlledBy(workspaceRole, &workspace) {
workspaceRole.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(workspaceRole.OwnerReferences)
if err := controllerutil.SetControllerReference(&workspace, workspaceRole, r.Scheme); err != nil {
logger.Error(err, "set controller reference failed")
return err
}
if err := r.Update(ctx, workspaceRole); err != nil {
logger.Error(err, "update workspace role failed")
return err
}
}
return nil
}
func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, workspaceRole *iamv1alpha2.WorkspaceRole) error {
if err := r.ensureNotControlledByKubefed(ctx, logger, workspaceRole); err != nil {
return err
}
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRole{}
if err := r.Client.Get(ctx, types.NamespacedName{Name: workspaceRole.Name}, federatedWorkspaceRole); err != nil {
if errors.IsNotFound(err) {
if federatedWorkspaceRole, err := newFederatedWorkspaceRole(workspaceRole); err != nil {
logger.Error(err, "create federated workspace role failed")
return err
} else {
if err := r.Create(ctx, federatedWorkspaceRole); err != nil {
logger.Error(err, "create federated workspace role failed")
return err
}
return nil
}
}
logger.Error(err, "get federated workspace role failed")
return err
}
if !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Rules, workspaceRole.Rules) ||
!reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Labels, workspaceRole.Labels) {
federatedWorkspaceRole.Spec.Template.Rules = workspaceRole.Rules
federatedWorkspaceRole.Spec.Template.Labels = workspaceRole.Labels
if err := r.Update(ctx, federatedWorkspaceRole); err != nil {
logger.Error(err, "update federated workspace role failed")
return err
}
}
return nil
}
func newFederatedWorkspaceRole(workspaceRole *iamv1alpha2.WorkspaceRole) (*typesv1beta1.FederatedWorkspaceRole, error) {
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRole{
ObjectMeta: metav1.ObjectMeta{
Name: workspaceRole.Name,
},
Spec: typesv1beta1.FederatedWorkspaceRoleSpec{
Template: typesv1beta1.WorkspaceRoleTemplate{
ObjectMeta: metav1.ObjectMeta{
Labels: workspaceRole.Labels,
},
Rules: workspaceRole.Rules,
},
Placement: typesv1beta1.GenericPlacementFields{
ClusterSelector: &metav1.LabelSelector{},
},
},
}
if err := controllerutil.SetControllerReference(workspaceRole, federatedWorkspaceRole, scheme.Scheme); err != nil {
return nil, err
}
return federatedWorkspaceRole, nil
}
func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger logr.Logger, workspaceRole *iamv1alpha2.WorkspaceRole) error {
if workspaceRole.Labels[constants.KubefedManagedLabel] != "false" {
if workspaceRole.Labels == nil {
workspaceRole.Labels = make(map[string]string)
}
workspaceRole.Labels[constants.KubefedManagedLabel] = "false"
if err := r.Update(ctx, workspaceRole); err != nil {
logger.Error(err, "update kubefed managed label failed")
if utils.WorkspaceTemplateMatchTargetCluster(workspaceTemplate, &cluster) {
target := &iamv1beta1.WorkspaceRole{ObjectMeta: metav1.ObjectMeta{Name: workspaceRole.Name}}
op, err := controllerutil.CreateOrUpdate(ctx, clusterClient, target, func() error {
target.Labels = workspaceRole.Labels
target.Annotations = workspaceRole.Annotations
target.Rules = workspaceRole.Rules
target.AggregationRoleTemplates = workspaceRole.AggregationRoleTemplates
return nil
})
if err != nil {
return err
}
klog.FromContext(ctx).V(4).Info("workspace role successfully synced", "cluster", cluster.Name, "operation", op, "name", workspaceRole.Name)
} else {
return client.IgnoreNotFound(clusterClient.DeleteAllOf(ctx, &iamv1beta1.WorkspaceRole{}, client.MatchingLabels{tenantv1beta1.WorkspaceLabel: workspaceTemplate.Name}))
}
return nil
}

View File

@@ -1,39 +1,34 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package workspacerole
import (
"context"
"os"
"path/filepath"
"testing"
"time"
"github.com/onsi/gomega/gexec"
"k8s.io/client-go/kubernetes/scheme"
"kubesphere.io/kubesphere/pkg/controller/controllertest"
"kubesphere.io/kubesphere/pkg/controller"
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/klog/v2"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/apis"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"kubesphere.io/kubesphere/pkg/multicluster"
"kubesphere.io/kubesphere/pkg/scheme"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
@@ -42,13 +37,15 @@ import (
var k8sClient client.Client
var k8sManager ctrl.Manager
var testEnv *envtest.Environment
var ctx context.Context
var cancel context.CancelFunc
func TestWorkspaceRoleController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "WorkspaceRole Controller Test Suite")
}
var _ = BeforeSuite(func(done Done) {
var _ = BeforeSuite(func() {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
@@ -58,8 +55,10 @@ var _ = BeforeSuite(func(done Done) {
UseExistingCluster: &t,
}
} else {
crdDirPaths, err := controllertest.LoadCrdPath()
Expect(err).ToNot(HaveOccurred())
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "ks-core", "crds")},
CRDDirectoryPaths: crdDirPaths,
AttachControlPlaneOutput: false,
}
}
@@ -68,32 +67,37 @@ var _ = BeforeSuite(func(done Done) {
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
MetricsBindAddress: "0",
Scheme: scheme.Scheme,
Metrics: metricsserver.Options{
BindAddress: "0",
},
})
Expect(err).ToNot(HaveOccurred())
err = (&Reconciler{}).SetupWithManager(k8sManager)
clusterClient, err := clusterclient.NewClusterClientSet(k8sManager.GetCache())
Expect(err).ToNot(HaveOccurred())
err = (&Reconciler{}).SetupWithManager(&controller.Manager{
Options: kscontroller.Options{MultiClusterOptions: &multicluster.Options{ClusterRole: string(clusterv1alpha1.ClusterRoleHost)}},
ClusterClient: clusterClient, Manager: k8sManager})
Expect(err).ToNot(HaveOccurred())
ctx, cancel = context.WithCancel(context.Background())
go func() {
err = k8sManager.Start(ctrl.SetupSignalHandler())
err = k8sManager.Start(ctx)
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
gexec.KillAndWait(5 * time.Second)
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
Eventually(func() error {
return testEnv.Stop()
}, 30*time.Second, 5*time.Second).ShouldNot(HaveOccurred())
})

View File

@@ -1,18 +1,7 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package workspacerole
@@ -21,15 +10,14 @@ import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
tenantv1alpha1 "kubesphere.io/api/tenant/v1alpha1"
tenantv1alpha2 "kubesphere.io/api/tenant/v1alpha2"
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
)
var _ = Describe("WorkspaceRole", func() {
@@ -37,7 +25,7 @@ var _ = Describe("WorkspaceRole", func() {
const timeout = time.Second * 30
const interval = time.Second * 1
workspace := &tenantv1alpha2.WorkspaceTemplate{
workspace := &tenantv1beta1.WorkspaceTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "workspace1",
},
@@ -53,10 +41,10 @@ var _ = Describe("WorkspaceRole", func() {
// test Kubernetes API server, which isn't the goal here.
Context("WorkspaceRole Controller", func() {
It("Should create successfully", func() {
workspaceAdmin := &iamv1alpha2.WorkspaceRole{
workspaceAdmin := &iamv1beta1.WorkspaceRole{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-admin", workspace.Name),
Labels: map[string]string{tenantv1alpha1.WorkspaceLabel: workspace.Name},
Labels: map[string]string{tenantv1beta1.WorkspaceLabel: workspace.Name},
},
Rules: []rbacv1.PolicyRule{},
}