diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index 5b45cd98d..9eeabdea7 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -18,7 +18,11 @@ package app import ( "fmt" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" "kubesphere.io/kubesphere/pkg/controller/application" "kubesphere.io/kubesphere/pkg/controller/certificatesigningrequest" "kubesphere.io/kubesphere/pkg/controller/cluster" @@ -26,6 +30,7 @@ import ( "kubesphere.io/kubesphere/pkg/controller/destinationrule" "kubesphere.io/kubesphere/pkg/controller/devopscredential" "kubesphere.io/kubesphere/pkg/controller/devopsproject" + "kubesphere.io/kubesphere/pkg/controller/globalrole" "kubesphere.io/kubesphere/pkg/controller/globalrolebinding" "kubesphere.io/kubesphere/pkg/controller/job" "kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy" @@ -37,11 +42,15 @@ import ( "kubesphere.io/kubesphere/pkg/controller/storage/expansion" "kubesphere.io/kubesphere/pkg/controller/user" "kubesphere.io/kubesphere/pkg/controller/virtualservice" + "kubesphere.io/kubesphere/pkg/controller/workspacerole" + "kubesphere.io/kubesphere/pkg/controller/workspacerolebinding" + "kubesphere.io/kubesphere/pkg/controller/workspacetemplate" "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/simple/client/devops" "kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/s3" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/kubefed/pkg/controller/util" ) func AddControllers( @@ -133,16 +142,86 @@ func AddControllers( kubernetesInformer.Apps().V1().ReplicaSets(), kubernetesInformer.Apps().V1().StatefulSets()) - userController := user.NewController(client.Kubernetes(), client.KubeSphere(), client.Config(), - kubesphereInformer.Iam().V1alpha2().Users()) + var fedUserCache, fedGlobalRoleBindingCache, fedGlobalRoleCache, + fedWorkspaceCache, fedWorkspaceRoleCache, fedWorkspaceRoleBindingCache cache.Store + var fedUserCacheController, fedGlobalRoleBindingCacheController, fedGlobalRoleCacheController, + fedWorkspaceCacheController, fedWorkspaceRoleCacheController, fedWorkspaceRoleBindingCacheController cache.Controller - csrController := certificatesigningrequest.NewController(client.Kubernetes(), kubernetesInformer, client.Config()) + if multiClusterEnabled { + + fedUserClient, err := util.NewResourceClient(client.Config(), &iamv1alpha2.FedUserResource) + if err != nil { + klog.Error(err) + return err + } + fedGlobalRoleClient, err := util.NewResourceClient(client.Config(), &iamv1alpha2.FedGlobalRoleResource) + if err != nil { + klog.Error(err) + return err + } + fedGlobalRoleBindingClient, err := util.NewResourceClient(client.Config(), &iamv1alpha2.FedGlobalRoleBindingResource) + if err != nil { + klog.Error(err) + return err + } + fedWorkspaceClient, err := util.NewResourceClient(client.Config(), &tenantv1alpha2.FedWorkspaceResource) + if err != nil { + klog.Error(err) + return err + } + fedWorkspaceRoleClient, err := util.NewResourceClient(client.Config(), &iamv1alpha2.FedWorkspaceRoleResource) + if err != nil { + klog.Error(err) + return err + } + fedWorkspaceRoleBindingClient, err := util.NewResourceClient(client.Config(), &iamv1alpha2.FedWorkspaceRoleBindingResource) + if err != nil { + klog.Error(err) + return err + } + + fedUserCache, fedUserCacheController = util.NewResourceInformer(fedUserClient, "", &iamv1alpha2.FedUserResource, func(object runtime.Object) {}) + fedGlobalRoleCache, fedGlobalRoleCacheController = util.NewResourceInformer(fedGlobalRoleClient, "", &iamv1alpha2.FedGlobalRoleResource, func(object runtime.Object) {}) + fedGlobalRoleBindingCache, fedGlobalRoleBindingCacheController = util.NewResourceInformer(fedGlobalRoleBindingClient, "", &iamv1alpha2.FedGlobalRoleBindingResource, func(object runtime.Object) {}) + fedWorkspaceCache, fedWorkspaceCacheController = util.NewResourceInformer(fedWorkspaceClient, "", &tenantv1alpha2.FedWorkspaceResource, func(object runtime.Object) {}) + fedWorkspaceRoleCache, fedWorkspaceRoleCacheController = util.NewResourceInformer(fedWorkspaceRoleClient, "", &iamv1alpha2.FedWorkspaceRoleResource, func(object runtime.Object) {}) + fedWorkspaceRoleBindingCache, fedWorkspaceRoleBindingCacheController = util.NewResourceInformer(fedWorkspaceRoleBindingClient, "", &iamv1alpha2.FedWorkspaceRoleBindingResource, func(object runtime.Object) {}) + + go fedUserCacheController.Run(stopCh) + go fedGlobalRoleCacheController.Run(stopCh) + go fedGlobalRoleBindingCacheController.Run(stopCh) + go fedWorkspaceCacheController.Run(stopCh) + go fedWorkspaceRoleCacheController.Run(stopCh) + go fedWorkspaceRoleBindingCacheController.Run(stopCh) + } + + userController := user.NewController(client.Kubernetes(), client.KubeSphere(), client.Config(), + kubesphereInformer.Iam().V1alpha2().Users(), + fedUserCache, fedUserCacheController, kubernetesInformer.Core().V1().ConfigMaps(), multiClusterEnabled) + + csrController := certificatesigningrequest.NewController(client.Kubernetes(), kubernetesInformer.Certificates().V1beta1().CertificateSigningRequests(), + kubernetesInformer.Core().V1().ConfigMaps(), client.Config()) clusterRoleBindingController := clusterrolebinding.NewController(client.Kubernetes(), kubernetesInformer.Rbac().V1().ClusterRoleBindings(), kubernetesInformer.Apps().V1().Deployments(), kubernetesInformer.Core().V1().Pods(), kubesphereInformer.Iam().V1alpha2().Users()) - globalRoleBindingController := globalrolebinding.NewController(client.Kubernetes(), kubesphereInformer.Iam().V1alpha2().GlobalRoleBindings(), multiClusterEnabled) + globalRoleController := globalrole.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Iam().V1alpha2().GlobalRoles(), fedGlobalRoleCache, fedGlobalRoleCacheController) + + workspaceRoleController := workspacerole.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Iam().V1alpha2().WorkspaceRoles(), fedWorkspaceRoleCache, fedWorkspaceRoleCacheController) + + globalRoleBindingController := globalrolebinding.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Iam().V1alpha2().GlobalRoleBindings(), fedGlobalRoleBindingCache, fedGlobalRoleBindingCacheController, multiClusterEnabled) + + workspaceRoleBindingController := workspacerolebinding.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Iam().V1alpha2().WorkspaceRoleBindings(), fedWorkspaceRoleBindingCache, fedWorkspaceRoleBindingCacheController) + + workspaceTemplateController := workspacetemplate.NewController(client.Kubernetes(), client.KubeSphere(), + kubesphereInformer.Tenant().V1alpha2().WorkspaceTemplates(), kubesphereInformer.Tenant().V1alpha1().Workspaces(), + kubesphereInformer.Iam().V1alpha2().RoleBases(), kubesphereInformer.Iam().V1alpha2().WorkspaceRoles(), + fedWorkspaceCache, fedWorkspaceCacheController, multiClusterEnabled) clusterController := cluster.NewClusterController( client.Kubernetes(), @@ -169,21 +248,31 @@ func AddControllers( "s2ibinary-controller": s2iBinaryController, "s2irun-controller": s2iRunController, "volumeexpansion-controller": volumeExpansionController, - "devopsprojects-controller": devopsProjectController, - "pipeline-controller": devopsPipelineController, - "devopscredential-controller": devopsCredentialController, "user-controller": userController, "cluster-controller": clusterController, "nsnp-controller": nsnpController, "csr-controller": csrController, "clusterrolebinding-controller": clusterRoleBindingController, "globalrolebinding-controller": globalRoleBindingController, + "workspacetemplate-controller": workspaceTemplateController, + } + + if devopsClient != nil { + controllers["pipeline-controller"] = devopsPipelineController + controllers["devopsprojects-controller"] = devopsProjectController + controllers["devopscredential-controller"] = devopsCredentialController } if storageCapabilityController.IsValidKubernetesVersion() { controllers["storagecapability-controller"] = storageCapabilityController } + if multiClusterEnabled { + controllers["globalrole-controller"] = globalRoleController + controllers["workspacerole-controller"] = workspaceRoleController + controllers["workspacerolebinding-controller"] = workspaceRoleBindingController + } + for name, ctrl := range controllers { if err := mgr.Add(ctrl); err != nil { klog.Error(err, "add controller to manager failed", "name", name) diff --git a/config/crds/iam.kubesphere.io_federatedclusterrolebindings.yaml b/config/crds/iam.kubesphere.io_federatedclusterrolebindings.yaml deleted file mode 100644 index 8e08f25c3..000000000 --- a/config/crds/iam.kubesphere.io_federatedclusterrolebindings.yaml +++ /dev/null @@ -1,125 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (devel) - creationTimestamp: null - name: federatedclusterrolebindings.iam.kubesphere.io -spec: - group: iam.kubesphere.io - names: - kind: FederatedClusterRoleBinding - listKind: FederatedClusterRoleBindingList - plural: federatedclusterrolebindings - singular: federatedclusterrolebinding - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - placement: - properties: - clusterSelector: - properties: - matchLabels: - additionalProperties: - type: string - type: object - type: object - clusters: - items: - properties: - name: - type: string - required: - - name - type: object - type: array - type: object - template: - properties: - roleRef: - description: RoleRef contains information that points to the role - being used - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - apiGroup - - kind - - name - type: object - subjects: - items: - description: Subject contains a reference to the object or user - identities a role binding applies to. This can either hold - a direct API object reference, or a value for non-objects such - as user and group names. - properties: - apiGroup: - description: APIGroup holds the API group of the referenced - subject. Defaults to "" for ServiceAccount subjects. Defaults - to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: Kind of object being referenced. Values defined - by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the - Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: Namespace of the referenced object. If the object - kind is non-namespace, such as "User" or "Group", and this - value is not empty the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - type: array - required: - - roleRef - type: object - required: - - placement - - template - type: object - required: - - spec - type: object - version: v1alpha2 - versions: - - name: v1alpha2 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crds/iam.kubesphere.io_rolebases.yaml b/config/crds/iam.kubesphere.io_rolebases.yaml new file mode 100644 index 000000000..4747ea70d --- /dev/null +++ b/config/crds/iam.kubesphere.io_rolebases.yaml @@ -0,0 +1,50 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: rolebases.iam.kubesphere.io +spec: + group: iam.kubesphere.io + names: + categories: + - iam + kind: RoleBase + listKind: RoleBaseList + plural: rolebases + singular: rolebase + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + role: + type: object + required: + - role + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/tenant.kubesphere.io_workspacetemplates.yaml b/config/crds/tenant.kubesphere.io_workspacetemplates.yaml index 35a79e576..71aa548eb 100644 --- a/config/crds/tenant.kubesphere.io_workspacetemplates.yaml +++ b/config/crds/tenant.kubesphere.io_workspacetemplates.yaml @@ -44,6 +44,35 @@ spec: type: string networkIsolation: type: boolean + overrides: + items: + properties: + clusterName: + type: string + clusterOverrides: + items: + properties: + op: + type: string + path: + type: string + value: + anyOf: + - type: string + - type: integer + - type: boolean + - type: object + - type: array + required: + - path + - value + type: object + type: array + required: + - clusterName + - clusterOverrides + type: object + type: array type: object type: object version: v1alpha2 diff --git a/pkg/apis/iam/v1alpha2/federated_types.go b/pkg/apis/iam/v1alpha2/federated_types.go new file mode 100644 index 000000000..27da6632e --- /dev/null +++ b/pkg/apis/iam/v1alpha2/federated_types.go @@ -0,0 +1,161 @@ +/* + + Copyright 2020 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1alpha2 + +import ( + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + ResourcesSingularFedUser = "federateduser" + ResourcesSingularFedGlobalRoleBinding = "federatedglobalrolebinding" + ResourcesSingularFedWorkspaceRoleBinding = "federatedworkspacerolebinding" + ResourcesSingularFedGlobalRole = "federatedglobalrole" + ResourcesSingularFedWorkspaceRole = "federatedworkspacerole" + ResourcesPluralFedUser = "federatedusers" + ResourcesPluralFedGlobalRoleBinding = "federatedglobalrolebindings" + ResourcesPluralFedWorkspaceRoleBinding = "federatedworkspacerolebindings" + ResourcesPluralFedGlobalRole = "federatedglobalroles" + ResourcesPluralFedWorkspaceRole = "federatedworkspaceroles" + FedClusterRoleBindingKind = "FederatedClusterRoleBinding" + FedClusterRoleKind = "FederatedClusterRole" + FedGlobalRoleKind = "FederatedGlobalRole" + FedWorkspaceRoleKind = "FederatedWorkspaceRole" + FedGlobalRoleBindingKind = "FederatedGlobalRoleBinding" + FedWorkspaceRoleBindingKind = "FederatedWorkspaceRoleBinding" + fedResourceGroup = "types.kubefed.io" + fedResourceVersion = "v1beta1" + FedUserKind = "FederatedUser" +) + +var ( + FedUserResource = metav1.APIResource{ + Name: ResourcesPluralFedUser, + SingularName: ResourcesSingularFedUser, + Namespaced: false, + Group: fedResourceGroup, + Version: fedResourceVersion, + Kind: FedUserKind, + } + FedGlobalRoleBindingResource = metav1.APIResource{ + Name: ResourcesPluralFedGlobalRoleBinding, + SingularName: ResourcesSingularFedGlobalRoleBinding, + Namespaced: false, + Group: fedResourceGroup, + Version: fedResourceVersion, + Kind: FedGlobalRoleBindingKind, + } + FedWorkspaceRoleBindingResource = metav1.APIResource{ + Name: ResourcesPluralFedWorkspaceRoleBinding, + SingularName: ResourcesSingularFedWorkspaceRoleBinding, + Namespaced: false, + Group: fedResourceGroup, + Version: fedResourceVersion, + Kind: FedWorkspaceRoleBindingKind, + } + FedGlobalRoleResource = metav1.APIResource{ + Name: ResourcesPluralFedGlobalRole, + SingularName: ResourcesSingularFedGlobalRole, + Namespaced: false, + Group: fedResourceGroup, + Version: fedResourceVersion, + Kind: FedGlobalRoleKind, + } + + FedWorkspaceRoleResource = metav1.APIResource{ + Name: ResourcesPluralFedWorkspaceRole, + SingularName: ResourcesSingularFedWorkspaceRole, + Namespaced: false, + Group: fedResourceGroup, + Version: fedResourceVersion, + Kind: FedWorkspaceRoleKind, + } + + FederatedClusterRoleBindingResource = schema.GroupVersionResource{ + Group: fedResourceGroup, + Version: fedResourceVersion, + Resource: "federatedclusterrolebindings", + } +) + +type FederatedRoleBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FederatedRoleBindingSpec `json:"spec"` +} + +type FederatedRoleBindingSpec struct { + Template RoleBindingTemplate `json:"template"` + Placement Placement `json:"placement"` +} +type RoleBindingTemplate struct { + metav1.ObjectMeta `json:"metadata,omitempty"` + Subjects []rbacv1.Subject `json:"subjects,omitempty"` + RoleRef rbacv1.RoleRef `json:"roleRef"` +} + +type FederatedRole struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FederatedRoleSpec `json:"spec"` +} + +type FederatedRoleSpec struct { + Template RoleTemplate `json:"template"` + Placement Placement `json:"placement"` +} + +type RoleTemplate struct { + metav1.ObjectMeta `json:"metadata,omitempty"` + // +optional + Rules []rbacv1.PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +type FederatedUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FederatedUserSpec `json:"spec"` +} + +type FederatedUserSpec struct { + Template UserTemplate `json:"template"` + Placement Placement `json:"placement"` +} + +type UserTemplate struct { + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec UserSpec `json:"spec"` + // +optional + Status UserStatus `json:"status,omitempty"` +} + +type Placement struct { + Clusters []Cluster `json:"clusters,omitempty"` + ClusterSelector ClusterSelector `json:"clusterSelector,omitempty"` +} + +type ClusterSelector struct { + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +type Cluster struct { + Name string `json:"name"` +} diff --git a/pkg/apis/iam/v1alpha2/register.go b/pkg/apis/iam/v1alpha2/register.go index 0a664e463..203799b24 100644 --- a/pkg/apis/iam/v1alpha2/register.go +++ b/pkg/apis/iam/v1alpha2/register.go @@ -59,7 +59,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &WorkspaceRoleList{}, &WorkspaceRoleBinding{}, &WorkspaceRoleBindingList{}, - &FederatedClusterRoleBinding{}, + &RoleBase{}, + &RoleBaseList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/apis/iam/v1alpha2/types.go b/pkg/apis/iam/v1alpha2/types.go index 86d99cd8d..137596bd9 100644 --- a/pkg/apis/iam/v1alpha2/types.go +++ b/pkg/apis/iam/v1alpha2/types.go @@ -19,6 +19,7 @@ package v1alpha2 import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) const ( @@ -65,6 +66,7 @@ const ( ScopeCluster = "cluster" ScopeNamespace = "namespace" PlatformAdmin = "platform-admin" + NamespaceAdmin = "admin" ClusterAdmin = "cluster-admin" ) @@ -284,31 +286,22 @@ type WorkspaceRoleBindingList struct { Items []WorkspaceRoleBinding `json:"items"` } +// +genclient +// +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type FederatedClusterRoleBinding struct { +// +kubebuilder:resource:categories="iam",scope="Cluster" +type RoleBase struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec FederatedClusterRoleBindingSpec `json:"spec"` + + Role runtime.RawExtension `json:"role"` } -type FederatedClusterRoleBindingSpec struct { - Template Template `json:"template"` - Placement Placement `json:"placement"` -} -type Template struct { - Subjects []rbacv1.Subject `json:"subjects,omitempty"` - RoleRef rbacv1.RoleRef `json:"roleRef"` -} +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Placement struct { - Clusters []Cluster `json:"clusters,omitempty"` - ClusterSelector ClusterSelector `json:"clusterSelector,omitempty"` -} - -type ClusterSelector struct { - MatchLabels map[string]string `json:"matchLabels,omitempty"` -} - -type Cluster struct { - Name string `json:"name"` +// RoleBaseList contains a list of RoleBase +type RoleBaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RoleBase `json:"items"` } diff --git a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go index 0a2a65d29..c95916cd4 100644 --- a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go @@ -63,44 +63,106 @@ func (in *ClusterSelector) DeepCopy() *ClusterSelector { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedClusterRoleBinding) DeepCopyInto(out *FederatedClusterRoleBinding) { +func (in *FederatedRole) DeepCopyInto(out *FederatedRole) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedClusterRoleBinding. -func (in *FederatedClusterRoleBinding) DeepCopy() *FederatedClusterRoleBinding { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedRole. +func (in *FederatedRole) DeepCopy() *FederatedRole { if in == nil { return nil } - out := new(FederatedClusterRoleBinding) + out := new(FederatedRole) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FederatedClusterRoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedRoleBinding) DeepCopyInto(out *FederatedRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedRoleBinding. +func (in *FederatedRoleBinding) DeepCopy() *FederatedRoleBinding { + if in == nil { + return nil } - return nil + out := new(FederatedRoleBinding) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedClusterRoleBindingSpec) DeepCopyInto(out *FederatedClusterRoleBindingSpec) { +func (in *FederatedRoleBindingSpec) DeepCopyInto(out *FederatedRoleBindingSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) in.Placement.DeepCopyInto(&out.Placement) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedClusterRoleBindingSpec. -func (in *FederatedClusterRoleBindingSpec) DeepCopy() *FederatedClusterRoleBindingSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedRoleBindingSpec. +func (in *FederatedRoleBindingSpec) DeepCopy() *FederatedRoleBindingSpec { if in == nil { return nil } - out := new(FederatedClusterRoleBindingSpec) + out := new(FederatedRoleBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedRoleSpec) DeepCopyInto(out *FederatedRoleSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + in.Placement.DeepCopyInto(&out.Placement) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedRoleSpec. +func (in *FederatedRoleSpec) DeepCopy() *FederatedRoleSpec { + if in == nil { + return nil + } + out := new(FederatedRoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedUser) DeepCopyInto(out *FederatedUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedUser. +func (in *FederatedUser) DeepCopy() *FederatedUser { + if in == nil { + return nil + } + out := new(FederatedUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedUserSpec) DeepCopyInto(out *FederatedUserSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + in.Placement.DeepCopyInto(&out.Placement) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedUserSpec. +func (in *FederatedUserSpec) DeepCopy() *FederatedUserSpec { + if in == nil { + return nil + } + out := new(FederatedUserSpec) in.DeepCopyInto(out) return out } @@ -254,8 +316,67 @@ func (in *Placement) DeepCopy() *Placement { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Template) DeepCopyInto(out *Template) { +func (in *RoleBase) DeepCopyInto(out *RoleBase) { *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Role.DeepCopyInto(&out.Role) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBase. +func (in *RoleBase) DeepCopy() *RoleBase { + if in == nil { + return nil + } + out := new(RoleBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBaseList) DeepCopyInto(out *RoleBaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBaseList. +func (in *RoleBaseList) DeepCopy() *RoleBaseList { + if in == nil { + return nil + } + out := new(RoleBaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingTemplate) DeepCopyInto(out *RoleBindingTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Subjects != nil { in, out := &in.Subjects, &out.Subjects *out = make([]v1.Subject, len(*in)) @@ -264,12 +385,35 @@ func (in *Template) DeepCopyInto(out *Template) { out.RoleRef = in.RoleRef } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. -func (in *Template) DeepCopy() *Template { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingTemplate. +func (in *RoleBindingTemplate) DeepCopy() *RoleBindingTemplate { if in == nil { return nil } - out := new(Template) + out := new(RoleBindingTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleTemplate) DeepCopyInto(out *RoleTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]v1.PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleTemplate. +func (in *RoleTemplate) DeepCopy() *RoleTemplate { + if in == nil { + return nil + } + out := new(RoleTemplate) in.DeepCopyInto(out) return out } @@ -391,6 +535,24 @@ func (in *UserStatus) DeepCopy() *UserStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserTemplate) DeepCopyInto(out *UserTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserTemplate. +func (in *UserTemplate) DeepCopy() *UserTemplate { + if in == nil { + return nil + } + out := new(UserTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceRole) DeepCopyInto(out *WorkspaceRole) { *out = *in diff --git a/pkg/apis/tenant/v1alpha2/workspacetemplate_types.go b/pkg/apis/tenant/v1alpha2/workspacetemplate_types.go index ab95e576d..690befcdd 100644 --- a/pkg/apis/tenant/v1alpha2/workspacetemplate_types.go +++ b/pkg/apis/tenant/v1alpha2/workspacetemplate_types.go @@ -18,6 +18,7 @@ package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" ) @@ -25,6 +26,22 @@ const ( ResourceKindWorkspaceTemplate = "WorkspaceTemplate" ResourceSingularWorkspaceTemplate = "workspacetemplate" ResourcePluralWorkspaceTemplate = "workspacetemplates" + ResourcesPluralFedWorkspace = "federatedworkspaces" + ResourcesSingularFedWorkspace = "federatedworkspace" + FedWorkspaceKind = "FederatedWorkspace" + fedResourceGroup = "types.kubefed.io" + fedResourceVersion = "v1beta1" +) + +var ( + FedWorkspaceResource = metav1.APIResource{ + Name: ResourcesPluralFedWorkspace, + SingularName: ResourcesSingularFedWorkspace, + Namespaced: false, + Group: fedResourceGroup, + Version: fedResourceVersion, + Kind: FedWorkspaceKind, + } ) // +genclient @@ -44,7 +61,8 @@ type WorkspaceTemplateSpec struct { v1alpha1.WorkspaceSpec `json:",inline"` // authorized clusters // +optional - Clusters []string `json:"clusters,omitempty"` + Clusters []string `json:"clusters,omitempty"` + Overrides []Override `json:"overrides,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -60,3 +78,44 @@ type WorkspaceTemplateList struct { func init() { SchemeBuilder.Register(&WorkspaceTemplate{}, &WorkspaceTemplateList{}) } + +type FederatedWorkspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FederatedWorkspaceSpec `json:"spec"` +} + +type FederatedWorkspaceSpec struct { + Template Template `json:"template"` + Placement Placement `json:"placement"` + Overrides []Override `json:"overrides,omitempty"` +} + +type Template struct { + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec v1alpha1.WorkspaceSpec `json:"spec"` +} + +type Placement struct { + Clusters []Cluster `json:"clusters,omitempty"` + ClusterSelector ClusterSelector `json:"clusterSelector,omitempty"` +} + +type ClusterSelector struct { + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +type Cluster struct { + Name string `json:"name"` +} + +type Override struct { + ClusterName string `json:"clusterName"` + ClusterOverrides []ClusterOverride `json:"clusterOverrides"` +} + +type ClusterOverride struct { + Path string `json:"path"` + Op string `json:"op,omitempty"` + Value runtime.RawExtension `json:"value"` +} diff --git a/pkg/apis/tenant/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/tenant/v1alpha2/zz_generated.deepcopy.go index cc1c04561..8c50d5992 100644 --- a/pkg/apis/tenant/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/tenant/v1alpha2/zz_generated.deepcopy.go @@ -21,9 +21,164 @@ limitations under the License. package v1alpha2 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOverride) DeepCopyInto(out *ClusterOverride) { + *out = *in + in.Value.DeepCopyInto(&out.Value) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOverride. +func (in *ClusterOverride) DeepCopy() *ClusterOverride { + if in == nil { + return nil + } + out := new(ClusterOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSelector) DeepCopyInto(out *ClusterSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelector. +func (in *ClusterSelector) DeepCopy() *ClusterSelector { + if in == nil { + return nil + } + out := new(ClusterSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedWorkspace) DeepCopyInto(out *FederatedWorkspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedWorkspace. +func (in *FederatedWorkspace) DeepCopy() *FederatedWorkspace { + if in == nil { + return nil + } + out := new(FederatedWorkspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedWorkspaceSpec) DeepCopyInto(out *FederatedWorkspaceSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + in.Placement.DeepCopyInto(&out.Placement) + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]Override, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedWorkspaceSpec. +func (in *FederatedWorkspaceSpec) DeepCopy() *FederatedWorkspaceSpec { + if in == nil { + return nil + } + out := new(FederatedWorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Override) DeepCopyInto(out *Override) { + *out = *in + if in.ClusterOverrides != nil { + in, out := &in.ClusterOverrides, &out.ClusterOverrides + *out = make([]ClusterOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Override. +func (in *Override) DeepCopy() *Override { + if in == nil { + return nil + } + out := new(Override) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Placement) DeepCopyInto(out *Placement) { + *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]Cluster, len(*in)) + copy(*out, *in) + } + in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. +func (in *Placement) DeepCopy() *Placement { + if in == nil { + return nil + } + out := new(Placement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceTemplate) DeepCopyInto(out *WorkspaceTemplate) { *out = *in @@ -91,6 +246,13 @@ func (in *WorkspaceTemplateSpec) DeepCopyInto(out *WorkspaceTemplateSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]Override, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceTemplateSpec. diff --git a/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_iam_client.go b/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_iam_client.go index 07bbfce08..31f091267 100644 --- a/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_iam_client.go +++ b/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_iam_client.go @@ -36,6 +36,10 @@ func (c *FakeIamV1alpha2) GlobalRoleBindings() v1alpha2.GlobalRoleBindingInterfa return &FakeGlobalRoleBindings{c} } +func (c *FakeIamV1alpha2) RoleBases() v1alpha2.RoleBaseInterface { + return &FakeRoleBases{c} +} + func (c *FakeIamV1alpha2) Users() v1alpha2.UserInterface { return &FakeUsers{c} } diff --git a/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_rolebase.go b/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_rolebase.go new file mode 100644 index 000000000..275cdc578 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake/fake_rolebase.go @@ -0,0 +1,120 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" +) + +// FakeRoleBases implements RoleBaseInterface +type FakeRoleBases struct { + Fake *FakeIamV1alpha2 +} + +var rolebasesResource = schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "rolebases"} + +var rolebasesKind = schema.GroupVersionKind{Group: "iam.kubesphere.io", Version: "v1alpha2", Kind: "RoleBase"} + +// Get takes name of the roleBase, and returns the corresponding roleBase object, and an error if there is any. +func (c *FakeRoleBases) Get(name string, options v1.GetOptions) (result *v1alpha2.RoleBase, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(rolebasesResource, name), &v1alpha2.RoleBase{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.RoleBase), err +} + +// List takes label and field selectors, and returns the list of RoleBases that match those selectors. +func (c *FakeRoleBases) List(opts v1.ListOptions) (result *v1alpha2.RoleBaseList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(rolebasesResource, rolebasesKind, opts), &v1alpha2.RoleBaseList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.RoleBaseList{ListMeta: obj.(*v1alpha2.RoleBaseList).ListMeta} + for _, item := range obj.(*v1alpha2.RoleBaseList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roleBases. +func (c *FakeRoleBases) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(rolebasesResource, opts)) +} + +// Create takes the representation of a roleBase and creates it. Returns the server's representation of the roleBase, and an error, if there is any. +func (c *FakeRoleBases) Create(roleBase *v1alpha2.RoleBase) (result *v1alpha2.RoleBase, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(rolebasesResource, roleBase), &v1alpha2.RoleBase{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.RoleBase), err +} + +// Update takes the representation of a roleBase and updates it. Returns the server's representation of the roleBase, and an error, if there is any. +func (c *FakeRoleBases) Update(roleBase *v1alpha2.RoleBase) (result *v1alpha2.RoleBase, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(rolebasesResource, roleBase), &v1alpha2.RoleBase{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.RoleBase), err +} + +// Delete takes name of the roleBase and deletes it. Returns an error if one occurs. +func (c *FakeRoleBases) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(rolebasesResource, name), &v1alpha2.RoleBase{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoleBases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(rolebasesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.RoleBaseList{}) + return err +} + +// Patch applies the patch and returns the patched roleBase. +func (c *FakeRoleBases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.RoleBase, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(rolebasesResource, name, pt, data, subresources...), &v1alpha2.RoleBase{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.RoleBase), err +} diff --git a/pkg/client/clientset/versioned/typed/iam/v1alpha2/generated_expansion.go b/pkg/client/clientset/versioned/typed/iam/v1alpha2/generated_expansion.go index a181b36b0..02e63ea85 100644 --- a/pkg/client/clientset/versioned/typed/iam/v1alpha2/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/iam/v1alpha2/generated_expansion.go @@ -22,6 +22,8 @@ type GlobalRoleExpansion interface{} type GlobalRoleBindingExpansion interface{} +type RoleBaseExpansion interface{} + type UserExpansion interface{} type WorkspaceRoleExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/iam/v1alpha2/iam_client.go b/pkg/client/clientset/versioned/typed/iam/v1alpha2/iam_client.go index d4cd2003b..089ed419d 100644 --- a/pkg/client/clientset/versioned/typed/iam/v1alpha2/iam_client.go +++ b/pkg/client/clientset/versioned/typed/iam/v1alpha2/iam_client.go @@ -28,6 +28,7 @@ type IamV1alpha2Interface interface { RESTClient() rest.Interface GlobalRolesGetter GlobalRoleBindingsGetter + RoleBasesGetter UsersGetter WorkspaceRolesGetter WorkspaceRoleBindingsGetter @@ -46,6 +47,10 @@ func (c *IamV1alpha2Client) GlobalRoleBindings() GlobalRoleBindingInterface { return newGlobalRoleBindings(c) } +func (c *IamV1alpha2Client) RoleBases() RoleBaseInterface { + return newRoleBases(c) +} + func (c *IamV1alpha2Client) Users() UserInterface { return newUsers(c) } diff --git a/pkg/client/clientset/versioned/typed/iam/v1alpha2/rolebase.go b/pkg/client/clientset/versioned/typed/iam/v1alpha2/rolebase.go new file mode 100644 index 000000000..cdf5a0c0e --- /dev/null +++ b/pkg/client/clientset/versioned/typed/iam/v1alpha2/rolebase.go @@ -0,0 +1,164 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +// RoleBasesGetter has a method to return a RoleBaseInterface. +// A group's client should implement this interface. +type RoleBasesGetter interface { + RoleBases() RoleBaseInterface +} + +// RoleBaseInterface has methods to work with RoleBase resources. +type RoleBaseInterface interface { + Create(*v1alpha2.RoleBase) (*v1alpha2.RoleBase, error) + Update(*v1alpha2.RoleBase) (*v1alpha2.RoleBase, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.RoleBase, error) + List(opts v1.ListOptions) (*v1alpha2.RoleBaseList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.RoleBase, err error) + RoleBaseExpansion +} + +// roleBases implements RoleBaseInterface +type roleBases struct { + client rest.Interface +} + +// newRoleBases returns a RoleBases +func newRoleBases(c *IamV1alpha2Client) *roleBases { + return &roleBases{ + client: c.RESTClient(), + } +} + +// Get takes name of the roleBase, and returns the corresponding roleBase object, and an error if there is any. +func (c *roleBases) Get(name string, options v1.GetOptions) (result *v1alpha2.RoleBase, err error) { + result = &v1alpha2.RoleBase{} + err = c.client.Get(). + Resource("rolebases"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBases that match those selectors. +func (c *roleBases) List(opts v1.ListOptions) (result *v1alpha2.RoleBaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.RoleBaseList{} + err = c.client.Get(). + Resource("rolebases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBases. +func (c *roleBases) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("rolebases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a roleBase and creates it. Returns the server's representation of the roleBase, and an error, if there is any. +func (c *roleBases) Create(roleBase *v1alpha2.RoleBase) (result *v1alpha2.RoleBase, err error) { + result = &v1alpha2.RoleBase{} + err = c.client.Post(). + Resource("rolebases"). + Body(roleBase). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBase and updates it. Returns the server's representation of the roleBase, and an error, if there is any. +func (c *roleBases) Update(roleBase *v1alpha2.RoleBase) (result *v1alpha2.RoleBase, err error) { + result = &v1alpha2.RoleBase{} + err = c.client.Put(). + Resource("rolebases"). + Name(roleBase.Name). + Body(roleBase). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBase and deletes it. Returns an error if one occurs. +func (c *roleBases) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("rolebases"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("rolebases"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched roleBase. +func (c *roleBases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.RoleBase, err error) { + result = &v1alpha2.RoleBase{} + err = c.client.Patch(pt). + Resource("rolebases"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/doc.go index f2efa4141..769278743 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/doc.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/doc.go index 329c98fb5..7e36dbca8 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/doc.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storage_client.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storage_client.go index 058aedec3..31aac53d8 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storage_client.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageclasscapability.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageclasscapability.go index aafe1134d..ab65a5b77 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageclasscapability.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/fake/fake_storageclasscapability.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/generated_expansion.go index 93a7eb394..740858649 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/storage_client.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/storage_client.go index d493de22f..930561fc8 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/storage_client.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/storage_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/clientset/versioned/typed/storage/v1alpha1/storageclasscapability.go b/pkg/client/clientset/versioned/typed/storage/v1alpha1/storageclasscapability.go index 9f24d83f2..88ee1ca83 100644 --- a/pkg/client/clientset/versioned/typed/storage/v1alpha1/storageclasscapability.go +++ b/pkg/client/clientset/versioned/typed/storage/v1alpha1/storageclasscapability.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 71d7a2028..1931b8cc0 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -85,6 +85,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Iam().V1alpha2().GlobalRoles().Informer()}, nil case v1alpha2.SchemeGroupVersion.WithResource("globalrolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Iam().V1alpha2().GlobalRoleBindings().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("rolebases"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Iam().V1alpha2().RoleBases().Informer()}, nil case v1alpha2.SchemeGroupVersion.WithResource("users"): return &genericInformer{resource: resource.GroupResource(), informer: f.Iam().V1alpha2().Users().Informer()}, nil case v1alpha2.SchemeGroupVersion.WithResource("workspaceroles"): diff --git a/pkg/client/informers/externalversions/iam/v1alpha2/interface.go b/pkg/client/informers/externalversions/iam/v1alpha2/interface.go index ea29ff8a5..05037e41d 100644 --- a/pkg/client/informers/externalversions/iam/v1alpha2/interface.go +++ b/pkg/client/informers/externalversions/iam/v1alpha2/interface.go @@ -28,6 +28,8 @@ type Interface interface { GlobalRoles() GlobalRoleInformer // GlobalRoleBindings returns a GlobalRoleBindingInformer. GlobalRoleBindings() GlobalRoleBindingInformer + // RoleBases returns a RoleBaseInformer. + RoleBases() RoleBaseInformer // Users returns a UserInformer. Users() UserInformer // WorkspaceRoles returns a WorkspaceRoleInformer. @@ -57,6 +59,11 @@ func (v *version) GlobalRoleBindings() GlobalRoleBindingInformer { return &globalRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// RoleBases returns a RoleBaseInformer. +func (v *version) RoleBases() RoleBaseInformer { + return &roleBaseInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Users returns a UserInformer. func (v *version) Users() UserInformer { return &userInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/iam/v1alpha2/rolebase.go b/pkg/client/informers/externalversions/iam/v1alpha2/rolebase.go new file mode 100644 index 000000000..5e9689a2f --- /dev/null +++ b/pkg/client/informers/externalversions/iam/v1alpha2/rolebase.go @@ -0,0 +1,88 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" +) + +// RoleBaseInformer provides access to a shared informer and lister for +// RoleBases. +type RoleBaseInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.RoleBaseLister +} + +type roleBaseInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewRoleBaseInformer constructs a new informer for RoleBase type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBaseInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBaseInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBaseInformer constructs a new informer for RoleBase type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBaseInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.IamV1alpha2().RoleBases().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.IamV1alpha2().RoleBases().Watch(options) + }, + }, + &iamv1alpha2.RoleBase{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBaseInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBaseInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBaseInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&iamv1alpha2.RoleBase{}, f.defaultInformer) +} + +func (f *roleBaseInformer) Lister() v1alpha2.RoleBaseLister { + return v1alpha2.NewRoleBaseLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/storage/interface.go b/pkg/client/informers/externalversions/storage/interface.go index 20b0918e4..416695e5f 100644 --- a/pkg/client/informers/externalversions/storage/interface.go +++ b/pkg/client/informers/externalversions/storage/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/externalversions/storage/v1alpha1/interface.go b/pkg/client/informers/externalversions/storage/v1alpha1/interface.go index 10f7ab55b..cf0a6e904 100644 --- a/pkg/client/informers/externalversions/storage/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/storage/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/informers/externalversions/storage/v1alpha1/storageclasscapability.go b/pkg/client/informers/externalversions/storage/v1alpha1/storageclasscapability.go index c116fcb09..aaaffce7a 100644 --- a/pkg/client/informers/externalversions/storage/v1alpha1/storageclasscapability.go +++ b/pkg/client/informers/externalversions/storage/v1alpha1/storageclasscapability.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/iam/v1alpha2/expansion_generated.go b/pkg/client/listers/iam/v1alpha2/expansion_generated.go index 7b7e05202..b2b4fc679 100644 --- a/pkg/client/listers/iam/v1alpha2/expansion_generated.go +++ b/pkg/client/listers/iam/v1alpha2/expansion_generated.go @@ -26,6 +26,10 @@ type GlobalRoleListerExpansion interface{} // GlobalRoleBindingLister. type GlobalRoleBindingListerExpansion interface{} +// RoleBaseListerExpansion allows custom methods to be added to +// RoleBaseLister. +type RoleBaseListerExpansion interface{} + // UserListerExpansion allows custom methods to be added to // UserLister. type UserListerExpansion interface{} diff --git a/pkg/client/listers/iam/v1alpha2/rolebase.go b/pkg/client/listers/iam/v1alpha2/rolebase.go new file mode 100644 index 000000000..433d5013f --- /dev/null +++ b/pkg/client/listers/iam/v1alpha2/rolebase.go @@ -0,0 +1,65 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" +) + +// RoleBaseLister helps list RoleBases. +type RoleBaseLister interface { + // List lists all RoleBases in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.RoleBase, err error) + // Get retrieves the RoleBase from the index for a given name. + Get(name string) (*v1alpha2.RoleBase, error) + RoleBaseListerExpansion +} + +// roleBaseLister implements the RoleBaseLister interface. +type roleBaseLister struct { + indexer cache.Indexer +} + +// NewRoleBaseLister returns a new RoleBaseLister. +func NewRoleBaseLister(indexer cache.Indexer) RoleBaseLister { + return &roleBaseLister{indexer: indexer} +} + +// List lists all RoleBases in the indexer. +func (s *roleBaseLister) List(selector labels.Selector) (ret []*v1alpha2.RoleBase, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.RoleBase)) + }) + return ret, err +} + +// Get retrieves the RoleBase from the index for a given name. +func (s *roleBaseLister) Get(name string) (*v1alpha2.RoleBase, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("rolebase"), name) + } + return obj.(*v1alpha2.RoleBase), nil +} diff --git a/pkg/client/listers/storage/v1alpha1/expansion_generated.go b/pkg/client/listers/storage/v1alpha1/expansion_generated.go index 583d96a8c..2789f5e52 100644 --- a/pkg/client/listers/storage/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/storage/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/listers/storage/v1alpha1/storageclasscapability.go b/pkg/client/listers/storage/v1alpha1/storageclasscapability.go index 98b1abd01..06ddae78d 100644 --- a/pkg/client/listers/storage/v1alpha1/storageclasscapability.go +++ b/pkg/client/listers/storage/v1alpha1/storageclasscapability.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The KubeSphere authors. +Copyright 2020 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 4530686a8..631613c76 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -49,6 +49,7 @@ const ( DevopsOwner = "owner" DevopsReporter = "reporter" DevOpsProjectLabelKey = "kubesphere.io/devopsproject" + KubefedManagedLabel = "kubefed.io/managed" UserNameHeader = "X-Token-Username" diff --git a/pkg/controller/certificatesigningrequest/certificatesigningrequest_controller.go b/pkg/controller/certificatesigningrequest/certificatesigningrequest_controller.go index ae90e87b8..533ecf685 100644 --- a/pkg/controller/certificatesigningrequest/certificatesigningrequest_controller.go +++ b/pkg/controller/certificatesigningrequest/certificatesigningrequest_controller.go @@ -24,14 +24,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1" - coreinformers "k8s.io/client-go/informers/core/v1" + corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" certificateslisters "k8s.io/client-go/listers/certificates/v1beta1" - corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" @@ -55,11 +53,7 @@ type Controller struct { csrInformer certificatesinformers.CertificateSigningRequestInformer csrLister certificateslisters.CertificateSigningRequestLister csrSynced cache.InformerSynced - - cmInformer coreinformers.ConfigMapInformer - cmLister corelisters.ConfigMapLister - cmSynced cache.InformerSynced - + cmSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a @@ -72,7 +66,8 @@ type Controller struct { kubeconfigOperator kubeconfig.Interface } -func NewController(k8sClient kubernetes.Interface, informerFactory informers.SharedInformerFactory, config *rest.Config) *Controller { +func NewController(k8sClient kubernetes.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer, + configMapInformer corev1informers.ConfigMapInformer, config *rest.Config) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. @@ -82,17 +77,13 @@ func NewController(k8sClient kubernetes.Interface, informerFactory informers.Sha eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) - csrInformer := informerFactory.Certificates().V1beta1().CertificateSigningRequests() - cmInformer := informerFactory.Core().V1().ConfigMaps() ctl := &Controller{ k8sclient: k8sClient, csrInformer: csrInformer, csrLister: csrInformer.Lister(), csrSynced: csrInformer.Informer().HasSynced, - cmInformer: cmInformer, - cmLister: cmInformer.Lister(), - cmSynced: cmInformer.Informer().HasSynced, - kubeconfigOperator: kubeconfig.NewOperator(k8sClient, config, ""), + cmSynced: configMapInformer.Informer().HasSynced, + kubeconfigOperator: kubeconfig.NewOperator(k8sClient, configMapInformer, config), workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CertificateSigningRequest"), recorder: recorder, } @@ -112,7 +103,7 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer c.workqueue.ShutDown() // Start the csrInformer factories to begin populating the csrInformer caches - klog.Info("Starting User controller") + klog.Info("Starting CSR controller") // Wait for the caches to be csrSynced before starting workers klog.Info("Waiting for csrInformer caches to sync") diff --git a/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go b/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go index af0dc592d..140bc5d51 100644 --- a/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go +++ b/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go @@ -100,10 +100,8 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - //init client - // Start the informer factories to begin populating the informer caches - klog.Info("Starting User controller") + klog.Info("Starting ClusterRoleBinding controller") // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") diff --git a/pkg/controller/globalrole/globalrole_controller.go b/pkg/controller/globalrole/globalrole_controller.go new file mode 100644 index 000000000..29fb94df4 --- /dev/null +++ b/pkg/controller/globalrole/globalrole_controller.go @@ -0,0 +1,354 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package globalrole + +import ( + "encoding/json" + "fmt" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" + iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/constants" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "time" +) + +const ( + // SuccessSynced is used as part of the Event 'reason' when a Foo is synced + successSynced = "Synced" + // is synced successfully + messageResourceSynced = "GlobalRole synced successfully" + controllerName = "globalrole-controller" +) + +type Controller struct { + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + globalRoleInformer iamv1alpha2informers.GlobalRoleInformer + globalRoleLister iamv1alpha2listers.GlobalRoleLister + globalRoleSynced cache.InformerSynced + fedGlobalRoleCache cache.Store + fedGlobalRoleCacheController cache.Controller + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, globalRoleInformer iamv1alpha2informers.GlobalRoleInformer, + fedGlobalRoleCache cache.Store, fedGlobalRoleCacheController cache.Controller) *Controller { + // Create event broadcaster + // Add sample-controller types to the default Kubernetes Scheme so Events can be + // logged for sample-controller types. + + klog.V(4).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) + ctl := &Controller{ + k8sClient: k8sClient, + ksClient: ksClient, + globalRoleInformer: globalRoleInformer, + globalRoleLister: globalRoleInformer.Lister(), + globalRoleSynced: globalRoleInformer.Informer().HasSynced, + fedGlobalRoleCache: fedGlobalRoleCache, + fedGlobalRoleCacheController: fedGlobalRoleCacheController, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GlobalRole"), + recorder: recorder, + } + klog.Info("Setting up event handlers") + globalRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctl.enqueueClusterRole, + UpdateFunc: func(old, new interface{}) { + ctl.enqueueClusterRole(new) + }, + DeleteFunc: ctl.enqueueClusterRole, + }) + return ctl +} + +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting GlobalRole controller") + + // Wait for the caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + + if ok := cache.WaitForCacheSync(stopCh, c.globalRoleSynced, c.fedGlobalRoleCacheController.HasSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + klog.Info("Starting workers") + // Launch two workers to process Foo resources + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started workers") + <-stopCh + klog.Info("Shutting down workers") + return nil +} + +func (c *Controller) enqueueClusterRole(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the reconcile, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.reconcile(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced %s:%s", "key", key) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Foo resource +// with the current status of the resource. +func (c *Controller) reconcile(key string) error { + + globalRole, err := c.globalRoleLister.Get(key) + if err != nil { + // The user may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("globalrole '%s' in work queue no longer exists", key)) + return nil + } + klog.Error(err) + return err + } + + if err = c.multiClusterSync(globalRole); err != nil { + klog.Error(err) + return err + } + + c.recorder.Event(globalRole, corev1.EventTypeNormal, successSynced, messageResourceSynced) + return nil +} + +func (c *Controller) Start(stopCh <-chan struct{}) error { + return c.Run(4, stopCh) +} + +func (c *Controller) multiClusterSync(globalRole *iamv1alpha2.GlobalRole) error { + + if err := c.ensureNotControlledByKubefed(globalRole); err != nil { + klog.Error(err) + return err + } + + obj, exist, err := c.fedGlobalRoleCache.GetByKey(globalRole.Name) + if !exist { + return c.createFederatedGlobalRole(globalRole) + } + if err != nil { + klog.Error(err) + return err + } + + var federatedGlobalRole iamv1alpha2.FederatedRole + + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedGlobalRole); err != nil { + klog.Error(err) + return err + } + + if !reflect.DeepEqual(federatedGlobalRole.Spec.Template.Rules, globalRole.Rules) || + !reflect.DeepEqual(federatedGlobalRole.Spec.Template.Labels, globalRole.Labels) || + !reflect.DeepEqual(federatedGlobalRole.Spec.Template.Annotations, globalRole.Annotations) { + + federatedGlobalRole.Spec.Template.Rules = globalRole.Rules + federatedGlobalRole.Spec.Template.Annotations = globalRole.Annotations + federatedGlobalRole.Spec.Template.Labels = globalRole.Labels + + return c.updateFederatedGlobalRole(&federatedGlobalRole) + } + + return nil +} + +func (c *Controller) createFederatedGlobalRole(globalRole *iamv1alpha2.GlobalRole) error { + federatedGlobalRole := &iamv1alpha2.FederatedRole{ + TypeMeta: metav1.TypeMeta{ + Kind: iamv1alpha2.FedGlobalRoleKind, + APIVersion: iamv1alpha2.FedGlobalRoleResource.Group + "/" + iamv1alpha2.FedGlobalRoleResource.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: globalRole.Name, + }, + Spec: iamv1alpha2.FederatedRoleSpec{ + Template: iamv1alpha2.RoleTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Labels: globalRole.Labels, + Annotations: globalRole.Annotations, + }, + Rules: globalRole.Rules, + }, + Placement: iamv1alpha2.Placement{ + ClusterSelector: iamv1alpha2.ClusterSelector{}, + }, + }, + } + + err := controllerutil.SetControllerReference(globalRole, federatedGlobalRole, scheme.Scheme) + if err != nil { + return err + } + + data, err := json.Marshal(federatedGlobalRole) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + err = cli.RESTClient().Post(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedGlobalRoleResource.Group, + iamv1alpha2.FedGlobalRoleResource.Version, iamv1alpha2.FedGlobalRoleResource.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) updateFederatedGlobalRole(federatedGlobalRole *iamv1alpha2.FederatedRole) error { + + data, err := json.Marshal(federatedGlobalRole) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + + err = cli.RESTClient().Put(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedGlobalRoleResource.Group, + iamv1alpha2.FedGlobalRoleResource.Version, iamv1alpha2.FedGlobalRoleResource.Name, + federatedGlobalRole.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) ensureNotControlledByKubefed(globalRole *iamv1alpha2.GlobalRole) error { + if globalRole.Labels[constants.KubefedManagedLabel] != "false" { + if globalRole.Labels == nil { + globalRole.Labels = make(map[string]string, 0) + } + globalRole = globalRole.DeepCopy() + globalRole.Labels[constants.KubefedManagedLabel] = "false" + _, err := c.ksClient.IamV1alpha2().GlobalRoles().Update(globalRole) + if err != nil { + klog.Error(err) + } + } + return nil +} diff --git a/pkg/controller/globalrolebinding/globalrolebinding_controller.go b/pkg/controller/globalrolebinding/globalrolebinding_controller.go index 3d4726805..f245e44e5 100644 --- a/pkg/controller/globalrolebinding/globalrolebinding_controller.go +++ b/pkg/controller/globalrolebinding/globalrolebinding_controller.go @@ -23,6 +23,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -33,8 +35,11 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/constants" + "reflect" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "time" ) @@ -43,18 +48,18 @@ const ( // SuccessSynced is used as part of the Event 'reason' when a Foo is synced successSynced = "Synced" // is synced successfully - messageResourceSynced = "GlobalRoleBinding synced successfully" - controllerName = "globalrolebinding-controller" - federatedClusterRoleBindingKind = "FederatedClusterRoleBinding" - federatedResourceVersion = "types.kubefed.io/v1beta1" - federatedResourceAPIPath = "/apis/types.kubefed.io/v1beta1/federatedclusterrolebindings" + messageResourceSynced = "GlobalRoleBinding synced successfully" + controllerName = "globalrolebinding-controller" ) type Controller struct { - k8sClient kubernetes.Interface - informer iamv1alpha2informers.GlobalRoleBindingInformer - lister iamv1alpha2listers.GlobalRoleBindingLister - synced cache.InformerSynced + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer + globalRoleBindingLister iamv1alpha2listers.GlobalRoleBindingLister + globalRoleBindingSynced cache.InformerSynced + fedGlobalRoleBindingCache cache.Store + fedGlobalRoleBindingCacheController cache.Controller // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a @@ -67,7 +72,8 @@ type Controller struct { multiClusterEnabled bool } -func NewController(k8sClient kubernetes.Interface, globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer, multiClusterEnabled bool) *Controller { +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, globalRoleBindingInformer iamv1alpha2informers.GlobalRoleBindingInformer, + fedGlobalRoleBindingCache cache.Store, fedGlobalRoleBindingCacheController cache.Controller, multiClusterEnabled bool) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. @@ -78,13 +84,16 @@ func NewController(k8sClient kubernetes.Interface, globalRoleBindingInformer iam eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) ctl := &Controller{ - k8sClient: k8sClient, - informer: globalRoleBindingInformer, - lister: globalRoleBindingInformer.Lister(), - synced: globalRoleBindingInformer.Informer().HasSynced, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleBinding"), - recorder: recorder, - multiClusterEnabled: multiClusterEnabled, + k8sClient: k8sClient, + ksClient: ksClient, + globalRoleBindingInformer: globalRoleBindingInformer, + globalRoleBindingLister: globalRoleBindingInformer.Lister(), + globalRoleBindingSynced: globalRoleBindingInformer.Informer().HasSynced, + fedGlobalRoleBindingCache: fedGlobalRoleBindingCache, + fedGlobalRoleBindingCacheController: fedGlobalRoleBindingCacheController, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "GlobalRoleBinding"), + recorder: recorder, + multiClusterEnabled: multiClusterEnabled, } klog.Info("Setting up event handlers") globalRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -101,14 +110,19 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - //init client - // Start the informer factories to begin populating the informer caches - klog.Info("Starting User controller") + klog.Info("Starting GlobalRoleBinding controller") // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") - if ok := cache.WaitForCacheSync(stopCh, c.synced); !ok { + + synced := make([]cache.InformerSynced, 0) + synced = append(synced, c.globalRoleBindingSynced) + if c.multiClusterEnabled { + synced = append(synced, c.fedGlobalRoleBindingCacheController.HasSynced) + } + + if ok := cache.WaitForCacheSync(stopCh, synced...); !ok { return fmt.Errorf("failed to wait for caches to sync") } @@ -197,12 +211,12 @@ func (c *Controller) processNextWorkItem() bool { // with the current status of the resource. func (c *Controller) reconcile(key string) error { - globalRoleBinding, err := c.lister.Get(key) + globalRoleBinding, err := c.globalRoleBindingLister.Get(key) if err != nil { // The user may no longer exist, in which case we stop // processing. if errors.IsNotFound(err) { - utilruntime.HandleError(fmt.Errorf("clusterrolebinding '%s' in work queue no longer exists", key)) + utilruntime.HandleError(fmt.Errorf("globalrolebinding '%s' in work queue no longer exists", key)) return nil } klog.Error(err) @@ -216,6 +230,13 @@ func (c *Controller) reconcile(key string) error { } } + if c.multiClusterEnabled { + if err = c.multiClusterSync(globalRoleBinding); err != nil { + klog.Error(err) + return err + } + } + c.recorder.Event(globalRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced) return nil } @@ -224,89 +245,181 @@ func (c *Controller) Start(stopCh <-chan struct{}) error { return c.Run(4, stopCh) } -func (c *Controller) relateToClusterAdmin(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error { +func (c *Controller) multiClusterSync(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error { - if c.multiClusterEnabled { - - federatedClusterRoleBinding := &iamv1alpha2.FederatedClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: federatedClusterRoleBindingKind, - APIVersion: federatedResourceVersion, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("fed-%s", globalRoleBinding.Name), - }, - Spec: iamv1alpha2.FederatedClusterRoleBindingSpec{ - Template: iamv1alpha2.Template{ - Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects), - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: iamv1alpha2.ResourceKindClusterRole, - Name: iamv1alpha2.ClusterAdmin, - }, - }, - Placement: iamv1alpha2.Placement{ - ClusterSelector: iamv1alpha2.ClusterSelector{}, - }, - }, - } - - err := controllerutil.SetControllerReference(globalRoleBinding, federatedClusterRoleBinding, scheme.Scheme) - - if err != nil { - return err - } - - data, err := json.Marshal(federatedClusterRoleBinding) - - if err != nil { - return err - } - - cli := c.k8sClient.(*kubernetes.Clientset) - - err = cli.RESTClient().Post(). - AbsPath(federatedResourceAPIPath). - Body(data). - Do().Error() - - if err != nil { - if errors.IsAlreadyExists(err) { - return nil - } - return err - } - } else { - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("fed-%s", globalRoleBinding.Name), - }, - Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects), - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: iamv1alpha2.ResourceKindClusterRole, - Name: iamv1alpha2.ClusterAdmin, - }, - } - - err := controllerutil.SetControllerReference(globalRoleBinding, clusterRoleBinding, scheme.Scheme) - - if err != nil { - return err - } - - _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) - - if err != nil { - if errors.IsAlreadyExists(err) { - return nil - } - return err - } + if err := c.ensureNotControlledByKubefed(globalRoleBinding); err != nil { + klog.Error(err) + return err } + obj, exist, err := c.fedGlobalRoleBindingCache.GetByKey(globalRoleBinding.Name) + if !exist { + return c.createFederatedGlobalRoleBinding(globalRoleBinding) + } + if err != nil { + klog.Error(err) + return err + } + + var federatedGlobalRoleBinding iamv1alpha2.FederatedRoleBinding + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedGlobalRoleBinding) + + if err != nil { + klog.Error(err) + return err + } + + if !reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Subjects, globalRoleBinding.Subjects) || + !reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.RoleRef, globalRoleBinding.RoleRef) || + !reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Labels, globalRoleBinding.Labels) || + !reflect.DeepEqual(federatedGlobalRoleBinding.Spec.Template.Annotations, globalRoleBinding.Annotations) { + + federatedGlobalRoleBinding.Spec.Template.Subjects = globalRoleBinding.Subjects + federatedGlobalRoleBinding.Spec.Template.RoleRef = globalRoleBinding.RoleRef + federatedGlobalRoleBinding.Spec.Template.Annotations = globalRoleBinding.Annotations + federatedGlobalRoleBinding.Spec.Template.Labels = globalRoleBinding.Labels + + return c.updateFederatedGlobalRoleBinding(&federatedGlobalRoleBinding) + } + + return nil +} + +func (c *Controller) relateToClusterAdmin(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error { + + username := findExpectUsername(globalRoleBinding) + + // unexpected + if username == "" { + return nil + } + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", username, iamv1alpha2.ClusterAdmin), + }, + Subjects: ensureSubjectAPIVersionIsValid(globalRoleBinding.Subjects), + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: iamv1alpha2.ResourceKindClusterRole, + Name: iamv1alpha2.ClusterAdmin, + }, + } + + err := controllerutil.SetControllerReference(globalRoleBinding, clusterRoleBinding, scheme.Scheme) + if err != nil { + return err + } + + _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func findExpectUsername(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) string { + for _, subject := range globalRoleBinding.Subjects { + if subject.Kind == iamv1alpha2.ResourceKindUser { + return subject.Name + } + } + return "" +} + +func (c *Controller) createFederatedGlobalRoleBinding(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error { + federatedGlobalRoleBinding := &iamv1alpha2.FederatedRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: iamv1alpha2.FedGlobalRoleBindingKind, + APIVersion: iamv1alpha2.FedGlobalRoleBindingResource.Group + "/" + iamv1alpha2.FedGlobalRoleBindingResource.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: globalRoleBinding.Name, + }, + Spec: iamv1alpha2.FederatedRoleBindingSpec{ + Template: iamv1alpha2.RoleBindingTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Labels: globalRoleBinding.Labels, + Annotations: globalRoleBinding.Annotations, + }, + Subjects: globalRoleBinding.Subjects, + RoleRef: globalRoleBinding.RoleRef, + }, + Placement: iamv1alpha2.Placement{ + ClusterSelector: iamv1alpha2.ClusterSelector{}, + }, + }, + } + + err := controllerutil.SetControllerReference(globalRoleBinding, federatedGlobalRoleBinding, scheme.Scheme) + if err != nil { + return err + } + + data, err := json.Marshal(federatedGlobalRoleBinding) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + err = cli.RESTClient().Post(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedGlobalRoleBindingResource.Group, + iamv1alpha2.FedGlobalRoleBindingResource.Version, iamv1alpha2.FedGlobalRoleBindingResource.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) updateFederatedGlobalRoleBinding(federatedGlobalRoleBinding *iamv1alpha2.FederatedRoleBinding) error { + + data, err := json.Marshal(federatedGlobalRoleBinding) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + + err = cli.RESTClient().Put(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedGlobalRoleBindingResource.Group, + iamv1alpha2.FedGlobalRoleBindingResource.Version, iamv1alpha2.FedGlobalRoleBindingResource.Name, + federatedGlobalRoleBinding.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) ensureNotControlledByKubefed(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error { + if globalRoleBinding.Labels[constants.KubefedManagedLabel] != "false" { + if globalRoleBinding.Labels == nil { + globalRoleBinding.Labels = make(map[string]string, 0) + } + globalRoleBinding = globalRoleBinding.DeepCopy() + globalRoleBinding.Labels[constants.KubefedManagedLabel] = "false" + _, err := c.ksClient.IamV1alpha2().GlobalRoleBindings().Update(globalRoleBinding) + if err != nil { + klog.Error(err) + } + } return nil } diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index 37cb07334..36047a712 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -17,17 +17,23 @@ limitations under the License. package namespace import ( + "bytes" "context" + "fmt" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/klog" - "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/utils/sliceutil" + "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -37,11 +43,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -/** -* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller -* business logic. Delete these comments after modifying this file.* - */ - // Add creates a new Namespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { @@ -138,7 +139,15 @@ func (r *ReconcileNamespace) Reconcile(request reconcile.Request) (reconcile.Res return reconcile.Result{}, nil } - if err = r.checkAndBindWorkspace(instance); err != nil { + if err = r.bindWorkspace(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.initRoles(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.initCreatorRoleBinding(instance); err != nil { return reconcile.Result{}, err } @@ -157,7 +166,7 @@ func (r *ReconcileNamespace) isControlledByWorkspace(namespace *corev1.Namespace return true, nil } -func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace) error { +func (r *ReconcileNamespace) bindWorkspace(namespace *corev1.Namespace) error { workspaceName := namespace.Labels[constants.WorkspaceLabelKey] @@ -165,7 +174,7 @@ func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace) return nil } - workspace := &v1alpha1.Workspace{} + workspace := &tenantv1alpha1.Workspace{} err := r.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace) @@ -174,18 +183,20 @@ func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace) if errors.IsNotFound(err) { return nil } - klog.Errorf("bind workspace namespace: %s, workspace: %s, error: %s", namespace.Name, workspaceName, err) + klog.Error(err) return err } - if !metav1.IsControlledBy(namespace, workspace) { + // federated namespace not controlled by workspace + if namespace.Labels[constants.KubefedManagedLabel] != "true" && !metav1.IsControlledBy(namespace, workspace) { + namespace.OwnerReferences = removeWorkspaceOwnerReferences(namespace.OwnerReferences) if err := controllerutil.SetControllerReference(workspace, namespace, r.scheme); err != nil { - klog.Errorf("bind workspace namespace: %s, workspace: %s, error: %s", namespace.Name, workspaceName, err) + klog.Error(err) return err } err = r.Update(context.TODO(), namespace) if err != nil { - klog.Errorf("bind workspace namespace: %s, workspace: %s, error: %s", namespace.Name, workspaceName, err) + klog.Error(err) return err } } @@ -193,6 +204,16 @@ func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace) return nil } +func removeWorkspaceOwnerReferences(ownerReferences []metav1.OwnerReference) []metav1.OwnerReference { + for i, owner := range ownerReferences { + if owner.Kind == tenantv1alpha1.ResourceKindWorkspace { + ownerReferences = append(ownerReferences[:i], ownerReferences[i+1:]...) + i-- + } + } + return ownerReferences +} + func (r *ReconcileNamespace) deleteRouter(namespace string) error { routerName := constants.IngressControllerPrefix + namespace @@ -230,5 +251,79 @@ func (r *ReconcileNamespace) deleteRouter(namespace string) error { } return nil - +} + +func (r *ReconcileNamespace) initRoles(namespace *corev1.Namespace) error { + var roleBases iamv1alpha2.RoleBaseList + + err := r.List(context.Background(), &roleBases) + if err != nil { + klog.Error(err) + return err + } + + for _, roleBase := range roleBases.Items { + var role rbacv1.Role + + if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil { + var old rbacv1.Role + err := r.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, &old) + if err != nil { + if errors.IsNotFound(err) { + role.Namespace = namespace.Name + err = r.Client.Create(context.Background(), &role) + if err != nil { + klog.Error(err) + return err + } + continue + } + } + + if !reflect.DeepEqual(role.Labels, old.Labels) || + !reflect.DeepEqual(role.Annotations, old.Annotations) || + !reflect.DeepEqual(role.Rules, old.Rules) { + + old.Labels = role.Labels + old.Annotations = role.Annotations + old.Rules = role.Rules + + return r.Update(context.Background(), &old) + } + } + } + return nil +} + +func (r *ReconcileNamespace) initCreatorRoleBinding(namespace *corev1.Namespace) error { + if creator := namespace.Annotations[constants.CreatorAnnotationKey]; creator != "" { + creatorRoleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", creator, iamv1alpha2.NamespaceAdmin), + Namespace: namespace.Name, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: iamv1alpha2.ResourceKindRole, + Name: iamv1alpha2.NamespaceAdmin, + }, + Subjects: []rbacv1.Subject{ + { + Name: creator, + Kind: iamv1alpha2.ResourceKindUser, + APIGroup: rbacv1.GroupName, + }, + }, + } + err := r.Client.Create(context.Background(), creatorRoleBinding) + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + klog.Error(err) + return err + } + } + + return nil } diff --git a/pkg/controller/user/user_controller.go b/pkg/controller/user/user_controller.go index 6505fc78b..83c92a17c 100644 --- a/pkg/controller/user/user_controller.go +++ b/pkg/controller/user/user_controller.go @@ -17,12 +17,17 @@ limitations under the License. package user import ( + "encoding/json" "fmt" "golang.org/x/crypto/bcrypt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -36,7 +41,10 @@ import ( kubespherescheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" userinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" userlister "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/models/kubeconfig" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "strconv" "time" ) @@ -50,12 +58,15 @@ const ( ) type Controller struct { - k8sClient kubernetes.Interface - ksClient kubesphere.Interface - kubeconfig kubeconfig.Interface - userInformer userinformer.UserInformer - userLister userlister.UserLister - userSynced cache.InformerSynced + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + kubeconfig kubeconfig.Interface + userInformer userinformer.UserInformer + userLister userlister.UserLister + userSynced cache.InformerSynced + cmSynced cache.InformerSynced + fedUserCache cache.Store + fedUserController cache.Controller // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a @@ -64,11 +75,13 @@ type Controller struct { workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. - recorder record.EventRecorder + recorder record.EventRecorder + multiClusterEnabled bool } func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, - config *rest.Config, userInformer userinformer.UserInformer) *Controller { + config *rest.Config, userInformer userinformer.UserInformer, fedUserCache cache.Store, fedUserController cache.Controller, + configMapInformer corev1informers.ConfigMapInformer, multiClusterEnabled bool) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. @@ -81,17 +94,21 @@ func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) var kubeconfigOperator kubeconfig.Interface if config != nil { - kubeconfigOperator = kubeconfig.NewOperator(k8sClient, config, "") + kubeconfigOperator = kubeconfig.NewOperator(k8sClient, configMapInformer, config) } ctl := &Controller{ - k8sClient: k8sClient, - ksClient: ksClient, - kubeconfig: kubeconfigOperator, - userInformer: userInformer, - userLister: userInformer.Lister(), - userSynced: userInformer.Informer().HasSynced, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"), - recorder: recorder, + k8sClient: k8sClient, + ksClient: ksClient, + kubeconfig: kubeconfigOperator, + userInformer: userInformer, + userLister: userInformer.Lister(), + userSynced: userInformer.Informer().HasSynced, + cmSynced: configMapInformer.Informer().HasSynced, + fedUserCache: fedUserCache, + fedUserController: fedUserController, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"), + recorder: recorder, + multiClusterEnabled: multiClusterEnabled, } klog.Info("Setting up event handlers") userInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -108,14 +125,19 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() - //init client - // Start the informer factories to begin populating the informer caches klog.Info("Starting User controller") // Wait for the caches to be synced before starting workers klog.Info("Waiting for informer caches to sync") - if ok := cache.WaitForCacheSync(stopCh, c.userSynced); !ok { + + synced := make([]cache.InformerSynced, 0) + synced = append(synced, c.userSynced, c.cmSynced) + if c.multiClusterEnabled { + synced = append(synced, c.fedUserController.HasSynced) + } + + if ok := cache.WaitForCacheSync(stopCh, synced...); !ok { return fmt.Errorf("failed to wait for caches to sync") } @@ -217,16 +239,22 @@ func (c *Controller) reconcile(key string) error { return err } - user, err = c.encryptPassword(user.DeepCopy()) - - if err != nil { + if user, err = c.ensurePasswordIsEncrypted(user); err != nil { klog.Error(err) return err } if c.kubeconfig != nil { - err = c.kubeconfig.CreateKubeConfig(user) - if err != nil { + // ensure user kubeconfig configmap is created + if err = c.kubeconfig.CreateKubeConfig(user); err != nil { + klog.Error(err) + return err + } + } + + // synchronization through kubefed-controller when multi cluster is enabled + if c.multiClusterEnabled { + if err = c.multiClusterSync(user); err != nil { klog.Error(err) return err } @@ -240,9 +268,8 @@ func (c *Controller) Start(stopCh <-chan struct{}) error { return c.Run(4, stopCh) } -func (c *Controller) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User, error) { +func (c *Controller) ensurePasswordIsEncrypted(user *iamv1alpha2.User) (*iamv1alpha2.User, error) { encrypted, err := strconv.ParseBool(user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation]) - // password is not encrypted if err != nil || !encrypted { password, err := encrypt(user.Spec.EncryptedPassword) @@ -250,21 +277,148 @@ func (c *Controller) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User, klog.Error(err) return nil, err } + user = user.DeepCopy() user.Spec.EncryptedPassword = password if user.Annotations == nil { user.Annotations = make(map[string]string, 0) } + user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation] = "true" user.Status.State = iamv1alpha2.UserActive - - updated, err := c.ksClient.IamV1alpha2().Users().Update(user) - - return updated, err + return c.ksClient.IamV1alpha2().Users().Update(user) } return user, nil } +func (c *Controller) ensureNotControlledByKubefed(user *iamv1alpha2.User) error { + if user.Labels[constants.KubefedManagedLabel] != "false" { + if user.Labels == nil { + user.Labels = make(map[string]string, 0) + } + user = user.DeepCopy() + user.Labels[constants.KubefedManagedLabel] = "false" + _, err := c.ksClient.IamV1alpha2().Users().Update(user) + if err != nil { + klog.Error(err) + } + } + return nil +} + +func (c *Controller) multiClusterSync(user *iamv1alpha2.User) error { + + if err := c.ensureNotControlledByKubefed(user); err != nil { + klog.Error(err) + return err + } + + obj, exist, err := c.fedUserCache.GetByKey(user.Name) + if !exist { + return c.createFederatedUser(user) + } + if err != nil { + klog.Error(err) + return err + } + + var federatedUser iamv1alpha2.FederatedUser + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedUser); err != nil { + klog.Error(err) + return err + } + + if !reflect.DeepEqual(federatedUser.Spec.Template.Spec, user.Spec) || + !reflect.DeepEqual(federatedUser.Spec.Template.Status, user.Status) || + !reflect.DeepEqual(federatedUser.Labels, user.Labels) || + !reflect.DeepEqual(federatedUser.Annotations, user.Annotations) { + + federatedUser.Labels = user.Labels + federatedUser.Spec.Template.Spec = user.Spec + federatedUser.Spec.Template.Status = user.Status + federatedUser.Spec.Template.Labels = user.Labels + federatedUser.Spec.Template.Annotations = user.Annotations + return c.updateFederatedUser(&federatedUser) + } + + return nil +} + +func (c *Controller) createFederatedUser(user *iamv1alpha2.User) error { + federatedUser := &iamv1alpha2.FederatedUser{ + TypeMeta: metav1.TypeMeta{ + Kind: iamv1alpha2.FedUserKind, + APIVersion: iamv1alpha2.FedUserResource.Group + "/" + iamv1alpha2.FedUserResource.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: user.Name, + }, + Spec: iamv1alpha2.FederatedUserSpec{ + Template: iamv1alpha2.UserTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Labels: user.Labels, + Annotations: user.Annotations, + }, + Spec: user.Spec, + Status: user.Status, + }, + Placement: iamv1alpha2.Placement{ + ClusterSelector: iamv1alpha2.ClusterSelector{}, + }, + }, + } + + // must bind user lifecycle + err := controllerutil.SetControllerReference(user, federatedUser, scheme.Scheme) + if err != nil { + return err + } + + data, err := json.Marshal(federatedUser) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + + err = cli.RESTClient().Post(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedUserResource.Group, + iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) updateFederatedUser(fedUser *iamv1alpha2.FederatedUser) error { + data, err := json.Marshal(fedUser) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + + err = cli.RESTClient().Put(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedUserResource.Group, + iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name, fedUser.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + return nil +} + func encrypt(password string) (string, error) { // when user is already mapped to another identity, password is empty by default // unable to log in directly until password reset diff --git a/pkg/controller/user/user_controller_test.go b/pkg/controller/user/user_controller_test.go index a6de14d2f..01894afed 100644 --- a/pkg/controller/user/user_controller_test.go +++ b/pkg/controller/user/user_controller_test.go @@ -92,7 +92,7 @@ func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactor } } - c := NewController(f.k8sclient, f.ksclient, nil, ksinformers.Iam().V1alpha2().Users()) + c := NewController(f.k8sclient, f.ksclient, nil, ksinformers.Iam().V1alpha2().Users(), nil, nil, k8sinformers.Core().V1().ConfigMaps(), false) c.userSynced = alwaysReady c.recorder = &record.FakeRecorder{} diff --git a/pkg/controller/workspace/workspace_controller.go b/pkg/controller/workspace/workspace_controller.go index e806c00c5..4f4fb8df1 100644 --- a/pkg/controller/workspace/workspace_controller.go +++ b/pkg/controller/workspace/workspace_controller.go @@ -17,12 +17,19 @@ limitations under the License. package workspace import ( + "bytes" "context" + "fmt" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/record" + "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" "kubesphere.io/kubesphere/pkg/utils/sliceutil" + "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -117,5 +124,48 @@ func (r *ReconcileWorkspace) Reconcile(request reconcile.Request) (reconcile.Res return reconcile.Result{}, nil } + if err = r.initRoles(instance); err != nil { + klog.Error(err) + return reconcile.Result{}, err + } + return reconcile.Result{}, nil } + +func (r *ReconcileWorkspace) initRoles(workspace *tenantv1alpha1.Workspace) error { + var roleBases iamv1alpha2.RoleBaseList + + err := r.List(context.Background(), &roleBases) + if err != nil { + klog.Error(err) + return err + } + + for _, roleBase := range roleBases.Items { + var role iamv1alpha2.WorkspaceRole + + if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil { + var old iamv1alpha2.WorkspaceRole + err := r.Client.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-%s", workspace.Name, role.Name)}, &old) + if err != nil { + if errors.IsNotFound(err) { + role.Name = fmt.Sprintf("%s-%s", workspace.Name, role.Name) + role.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name + return r.Client.Create(context.Background(), &role) + } + } + + if !reflect.DeepEqual(role.Labels, old.Labels) || + !reflect.DeepEqual(role.Annotations, old.Annotations) || + !reflect.DeepEqual(role.Rules, old.Rules) { + + old.Labels = role.Labels + old.Annotations = role.Annotations + old.Rules = role.Rules + + return r.Update(context.Background(), &old) + } + } + } + return nil +} diff --git a/pkg/controller/workspacerole/workspacerole.go b/pkg/controller/workspacerole/workspacerole.go new file mode 100644 index 000000000..9f300ba02 --- /dev/null +++ b/pkg/controller/workspacerole/workspacerole.go @@ -0,0 +1,354 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workspacerole + +import ( + "encoding/json" + "fmt" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" + iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/constants" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "time" +) + +const ( + // SuccessSynced is used as part of the Event 'reason' when a Foo is synced + successSynced = "Synced" + // is synced successfully + messageResourceSynced = "WorkspaceRole synced successfully" + controllerName = "workspacerole-controller" +) + +type Controller struct { + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer + workspaceRoleLister iamv1alpha2listers.WorkspaceRoleLister + workspaceRoleSynced cache.InformerSynced + fedWorkspaceRoleCache cache.Store + fedWorkspaceRoleCacheController cache.Controller + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer, + fedWorkspaceRoleCache cache.Store, fedWorkspaceRoleCacheController cache.Controller) *Controller { + // Create event broadcaster + // Add sample-controller types to the default Kubernetes Scheme so Events can be + // logged for sample-controller types. + + klog.V(4).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) + ctl := &Controller{ + k8sClient: k8sClient, + ksClient: ksClient, + workspaceRoleInformer: workspaceRoleInformer, + workspaceRoleLister: workspaceRoleInformer.Lister(), + workspaceRoleSynced: workspaceRoleInformer.Informer().HasSynced, + fedWorkspaceRoleCache: fedWorkspaceRoleCache, + fedWorkspaceRoleCacheController: fedWorkspaceRoleCacheController, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceRole"), + recorder: recorder, + } + klog.Info("Setting up event handlers") + workspaceRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctl.enqueueClusterRole, + UpdateFunc: func(old, new interface{}) { + ctl.enqueueClusterRole(new) + }, + DeleteFunc: ctl.enqueueClusterRole, + }) + return ctl +} + +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting GlobalRole controller") + + // Wait for the caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + + if ok := cache.WaitForCacheSync(stopCh, c.workspaceRoleSynced, c.fedWorkspaceRoleCacheController.HasSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + klog.Info("Starting workers") + // Launch two workers to process Foo resources + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started workers") + <-stopCh + klog.Info("Shutting down workers") + return nil +} + +func (c *Controller) enqueueClusterRole(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the reconcile, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.reconcile(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced %s:%s", "key", key) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Foo resource +// with the current status of the resource. +func (c *Controller) reconcile(key string) error { + + workspaceRole, err := c.workspaceRoleLister.Get(key) + if err != nil { + // The user may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("workspacerole '%s' in work queue no longer exists", key)) + return nil + } + klog.Error(err) + return err + } + + if err = c.multiClusterSync(workspaceRole); err != nil { + klog.Error(err) + return err + } + + c.recorder.Event(workspaceRole, corev1.EventTypeNormal, successSynced, messageResourceSynced) + return nil +} + +func (c *Controller) Start(stopCh <-chan struct{}) error { + return c.Run(4, stopCh) +} + +func (c *Controller) multiClusterSync(workspaceRole *iamv1alpha2.WorkspaceRole) error { + + if err := c.ensureNotControlledByKubefed(workspaceRole); err != nil { + klog.Error(err) + return err + } + + obj, exist, err := c.fedWorkspaceRoleCache.GetByKey(workspaceRole.Name) + if !exist { + return c.createFederatedWorkspaceRole(workspaceRole) + } + if err != nil { + klog.Error(err) + return err + } + + var federatedWorkspaceRole iamv1alpha2.FederatedRole + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedWorkspaceRole); err != nil { + klog.Error(err) + return err + } + + if !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Rules, workspaceRole.Rules) || + !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Labels, workspaceRole.Labels) || + !reflect.DeepEqual(federatedWorkspaceRole.Spec.Template.Annotations, workspaceRole.Annotations) { + + federatedWorkspaceRole.Spec.Template.Rules = workspaceRole.Rules + federatedWorkspaceRole.Spec.Template.Annotations = workspaceRole.Annotations + federatedWorkspaceRole.Spec.Template.Labels = workspaceRole.Labels + + return c.updateFederatedGlobalRole(&federatedWorkspaceRole) + } + + return nil +} + +func (c *Controller) createFederatedWorkspaceRole(workspaceRole *iamv1alpha2.WorkspaceRole) error { + federatedWorkspaceRole := &iamv1alpha2.FederatedRole{ + TypeMeta: metav1.TypeMeta{ + Kind: iamv1alpha2.FedWorkspaceRoleKind, + APIVersion: iamv1alpha2.FedWorkspaceRoleResource.Group + "/" + iamv1alpha2.FedWorkspaceRoleResource.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: workspaceRole.Name, + }, + Spec: iamv1alpha2.FederatedRoleSpec{ + Template: iamv1alpha2.RoleTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Labels: workspaceRole.Labels, + Annotations: workspaceRole.Annotations, + }, + Rules: workspaceRole.Rules, + }, + Placement: iamv1alpha2.Placement{ + ClusterSelector: iamv1alpha2.ClusterSelector{}, + }, + }, + } + + err := controllerutil.SetControllerReference(workspaceRole, federatedWorkspaceRole, scheme.Scheme) + if err != nil { + return err + } + + data, err := json.Marshal(federatedWorkspaceRole) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + err = cli.RESTClient().Post(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleResource.Group, + iamv1alpha2.FedWorkspaceRoleResource.Version, iamv1alpha2.FedWorkspaceRoleResource.Name)). + Body(data). + Do().Error() + + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) updateFederatedGlobalRole(federatedWorkspaceRole *iamv1alpha2.FederatedRole) error { + + data, err := json.Marshal(federatedWorkspaceRole) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + + err = cli.RESTClient().Put(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleResource.Group, + iamv1alpha2.FedWorkspaceRoleResource.Version, iamv1alpha2.FedWorkspaceRoleResource.Name, + federatedWorkspaceRole.Name)). + Body(data). + Do().Error() + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) ensureNotControlledByKubefed(workspaceRole *iamv1alpha2.WorkspaceRole) error { + if workspaceRole.Labels[constants.KubefedManagedLabel] != "false" { + if workspaceRole.Labels == nil { + workspaceRole.Labels = make(map[string]string, 0) + } + workspaceRole = workspaceRole.DeepCopy() + workspaceRole.Labels[constants.KubefedManagedLabel] = "false" + _, err := c.ksClient.IamV1alpha2().WorkspaceRoles().Update(workspaceRole) + if err != nil { + klog.Error(err) + } + } + return nil +} diff --git a/pkg/controller/workspacerolebinding/workspacerolebinding_controller.go b/pkg/controller/workspacerolebinding/workspacerolebinding_controller.go new file mode 100644 index 000000000..78730684b --- /dev/null +++ b/pkg/controller/workspacerolebinding/workspacerolebinding_controller.go @@ -0,0 +1,432 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workspacerolebinding + +import ( + "encoding/json" + "fmt" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" + iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/constants" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "time" +) + +const ( + // SuccessSynced is used as part of the Event 'reason' when a Foo is synced + successSynced = "Synced" + // is synced successfully + messageResourceSynced = "WorkspaceRoleBinding synced successfully" + controllerName = "workspacerolebinding-controller" +) + +type Controller struct { + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + workspaceRoleBindingInformer iamv1alpha2informers.WorkspaceRoleBindingInformer + workspaceRoleBindingLister iamv1alpha2listers.WorkspaceRoleBindingLister + workspaceRoleBindingSynced cache.InformerSynced + fedWorkspaceRoleBindingCache cache.Store + fedWorkspaceRoleBindingCacheController cache.Controller + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceRoleBindingInformer iamv1alpha2informers.WorkspaceRoleBindingInformer, + fedWorkspaceRoleBindingCache cache.Store, fedWorkspaceRoleBindingCacheController cache.Controller) *Controller { + // Create event broadcaster + // Add sample-controller types to the default Kubernetes Scheme so Events can be + // logged for sample-controller types. + + klog.V(4).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) + ctl := &Controller{ + k8sClient: k8sClient, + ksClient: ksClient, + workspaceRoleBindingInformer: workspaceRoleBindingInformer, + workspaceRoleBindingLister: workspaceRoleBindingInformer.Lister(), + workspaceRoleBindingSynced: workspaceRoleBindingInformer.Informer().HasSynced, + fedWorkspaceRoleBindingCache: fedWorkspaceRoleBindingCache, + fedWorkspaceRoleBindingCacheController: fedWorkspaceRoleBindingCacheController, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceRoleBinding"), + recorder: recorder, + } + klog.Info("Setting up event handlers") + workspaceRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctl.enqueueClusterRoleBinding, + UpdateFunc: func(old, new interface{}) { + ctl.enqueueClusterRoleBinding(new) + }, + DeleteFunc: ctl.enqueueClusterRoleBinding, + }) + return ctl +} + +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting WorkspaceRoleBinding controller") + + // Wait for the caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + + if ok := cache.WaitForCacheSync(stopCh, c.workspaceRoleBindingSynced, c.fedWorkspaceRoleBindingCacheController.HasSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + klog.Info("Starting workers") + // Launch two workers to process Foo resources + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started workers") + <-stopCh + klog.Info("Shutting down workers") + return nil +} + +func (c *Controller) enqueueClusterRoleBinding(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the reconcile, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.reconcile(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced %s:%s", "key", key) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Foo resource +// with the current status of the resource. +func (c *Controller) reconcile(key string) error { + + workspaceRoleBinding, err := c.workspaceRoleBindingLister.Get(key) + if err != nil { + // The user may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("workspacerolebinding '%s' in work queue no longer exists", key)) + return nil + } + klog.Error(err) + return err + } + + if err = c.multiClusterSync(workspaceRoleBinding); err != nil { + klog.Error(err) + return err + } + + c.recorder.Event(workspaceRoleBinding, corev1.EventTypeNormal, successSynced, messageResourceSynced) + return nil +} + +func (c *Controller) Start(stopCh <-chan struct{}) error { + return c.Run(4, stopCh) +} + +func (c *Controller) multiClusterSync(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error { + + if err := c.ensureNotControlledByKubefed(workspaceRoleBinding); err != nil { + klog.Error(err) + return err + } + + obj, exist, err := c.fedWorkspaceRoleBindingCache.GetByKey(workspaceRoleBinding.Name) + + if !exist { + return c.createFederatedWorkspaceRoleBinding(workspaceRoleBinding) + } + + if err != nil { + klog.Error(err) + return err + } + + var federatedWorkspaceRoleBinding iamv1alpha2.FederatedRoleBinding + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedWorkspaceRoleBinding) + + if err != nil { + klog.Error(err) + return err + } + + if !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Subjects, workspaceRoleBinding.Subjects) || + !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.RoleRef, workspaceRoleBinding.RoleRef) || + !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Labels, workspaceRoleBinding.Labels) || + !reflect.DeepEqual(federatedWorkspaceRoleBinding.Spec.Template.Annotations, workspaceRoleBinding.Annotations) { + + federatedWorkspaceRoleBinding.Spec.Template.Subjects = workspaceRoleBinding.Subjects + federatedWorkspaceRoleBinding.Spec.Template.RoleRef = workspaceRoleBinding.RoleRef + federatedWorkspaceRoleBinding.Spec.Template.Annotations = workspaceRoleBinding.Annotations + federatedWorkspaceRoleBinding.Spec.Template.Labels = workspaceRoleBinding.Labels + + return c.updateFederatedWorkspaceRoleBinding(&federatedWorkspaceRoleBinding) + } + + return nil +} + +func (c *Controller) relateToClusterAdmin(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error { + + username := findExpectUsername(workspaceRoleBinding) + + // unexpected + if username == "" { + return nil + } + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", username, iamv1alpha2.ClusterAdmin), + }, + Subjects: ensureSubjectAPIVersionIsValid(workspaceRoleBinding.Subjects), + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: iamv1alpha2.ResourceKindClusterRole, + Name: iamv1alpha2.ClusterAdmin, + }, + } + + err := controllerutil.SetControllerReference(workspaceRoleBinding, clusterRoleBinding, scheme.Scheme) + + if err != nil { + return err + } + + _, err = c.k8sClient.RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) + + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +// binding only one user is expected +func findExpectUsername(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) string { + for _, subject := range workspaceRoleBinding.Subjects { + if subject.Kind == iamv1alpha2.ResourceKindUser { + return subject.Name + } + } + return "" +} + +func (c *Controller) createFederatedWorkspaceRoleBinding(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error { + federatedWorkspaceRoleBinding := &iamv1alpha2.FederatedRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: iamv1alpha2.FedWorkspaceRoleBindingKind, + APIVersion: iamv1alpha2.FedWorkspaceRoleBindingResource.Group + "/" + iamv1alpha2.FedWorkspaceRoleBindingResource.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: workspaceRoleBinding.Name, + }, + Spec: iamv1alpha2.FederatedRoleBindingSpec{ + Template: iamv1alpha2.RoleBindingTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Labels: workspaceRoleBinding.Labels, + Annotations: workspaceRoleBinding.Annotations, + }, + Subjects: workspaceRoleBinding.Subjects, + RoleRef: workspaceRoleBinding.RoleRef, + }, + Placement: iamv1alpha2.Placement{ + ClusterSelector: iamv1alpha2.ClusterSelector{}, + }, + }, + } + + err := controllerutil.SetControllerReference(workspaceRoleBinding, federatedWorkspaceRoleBinding, scheme.Scheme) + + if err != nil { + return err + } + + data, err := json.Marshal(federatedWorkspaceRoleBinding) + + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + err = cli.RESTClient().Post(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleBindingResource.Group, + iamv1alpha2.FedWorkspaceRoleBindingResource.Version, iamv1alpha2.FedWorkspaceRoleBindingResource.Name)). + Body(data). + Do().Error() + + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) updateFederatedWorkspaceRoleBinding(federatedWorkspaceRoleBinding *iamv1alpha2.FederatedRoleBinding) error { + + data, err := json.Marshal(federatedWorkspaceRoleBinding) + + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + + err = cli.RESTClient().Put(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedWorkspaceRoleBindingResource.Group, + iamv1alpha2.FedWorkspaceRoleBindingResource.Version, iamv1alpha2.FedWorkspaceRoleBindingResource.Name, + federatedWorkspaceRoleBinding.Name)). + Body(data). + Do().Error() + + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) ensureNotControlledByKubefed(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error { + if workspaceRoleBinding.Labels[constants.KubefedManagedLabel] != "false" { + if workspaceRoleBinding.Labels == nil { + workspaceRoleBinding.Labels = make(map[string]string, 0) + } + workspaceRoleBinding = workspaceRoleBinding.DeepCopy() + workspaceRoleBinding.Labels[constants.KubefedManagedLabel] = "false" + _, err := c.ksClient.IamV1alpha2().WorkspaceRoleBindings().Update(workspaceRoleBinding) + if err != nil { + klog.Error(err) + } + } + return nil +} + +func ensureSubjectAPIVersionIsValid(subjects []rbacv1.Subject) []rbacv1.Subject { + validSubjects := make([]rbacv1.Subject, 0) + for _, subject := range subjects { + if subject.Kind == iamv1alpha2.ResourceKindUser { + validSubject := rbacv1.Subject{ + Kind: iamv1alpha2.ResourceKindUser, + APIGroup: "rbac.authorization.k8s.io", + Name: subject.Name, + } + validSubjects = append(validSubjects, validSubject) + } + } + return validSubjects +} diff --git a/pkg/controller/workspacetemplate/workspacetemplate_controller.go b/pkg/controller/workspacetemplate/workspacetemplate_controller.go new file mode 100644 index 000000000..cc70a5817 --- /dev/null +++ b/pkg/controller/workspacetemplate/workspacetemplate_controller.go @@ -0,0 +1,494 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workspacetemplate + +import ( + "bytes" + "encoding/json" + "fmt" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" + kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2" + tenantv1alpha1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1" + tenantv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha2" + iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2" + tenantv1alpha1listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha1" + tenantv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha2" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "time" +) + +const ( + // SuccessSynced is used as part of the Event 'reason' when a Foo is synced + successSynced = "Synced" + // is synced successfully + messageResourceSynced = "WorkspaceTemplate synced successfully" + controllerName = "workspacetemplate-controller" +) + +type Controller struct { + k8sClient kubernetes.Interface + ksClient kubesphere.Interface + workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer + workspaceTemplateLister tenantv1alpha2listers.WorkspaceTemplateLister + workspaceTemplateSynced cache.InformerSynced + workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer + workspaceRoleLister iamv1alpha2listers.WorkspaceRoleLister + workspaceRoleSynced cache.InformerSynced + roleBaseInformer iamv1alpha2informers.RoleBaseInformer + roleBaseLister iamv1alpha2listers.RoleBaseLister + roleBaseSynced cache.InformerSynced + workspaceInformer tenantv1alpha1informers.WorkspaceInformer + workspaceLister tenantv1alpha1listers.WorkspaceLister + workspaceSynced cache.InformerSynced + fedWorkspaceCache cache.Store + fedWorkspaceCacheController cache.Controller + multiClusterEnabled bool + // workqueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + workqueue workqueue.RateLimitingInterface + // recorder is an event recorder for recording Event resources to the + // Kubernetes API. + recorder record.EventRecorder +} + +func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, workspaceTemplateInformer tenantv1alpha2informers.WorkspaceTemplateInformer, + workspaceInformer tenantv1alpha1informers.WorkspaceInformer, roleBaseInformer iamv1alpha2informers.RoleBaseInformer, workspaceRoleInformer iamv1alpha2informers.WorkspaceRoleInformer, + fedWorkspaceCache cache.Store, fedWorkspaceCacheController cache.Controller, multiClusterEnabled bool) *Controller { + // Create event broadcaster + // Add sample-controller types to the default Kubernetes Scheme so Events can be + // logged for sample-controller types. + + klog.V(4).Info("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) + ctl := &Controller{ + k8sClient: k8sClient, + ksClient: ksClient, + workspaceTemplateInformer: workspaceTemplateInformer, + workspaceTemplateLister: workspaceTemplateInformer.Lister(), + workspaceTemplateSynced: workspaceTemplateInformer.Informer().HasSynced, + workspaceInformer: workspaceInformer, + workspaceLister: workspaceInformer.Lister(), + workspaceSynced: workspaceInformer.Informer().HasSynced, + workspaceRoleInformer: workspaceRoleInformer, + workspaceRoleLister: workspaceRoleInformer.Lister(), + workspaceRoleSynced: workspaceRoleInformer.Informer().HasSynced, + roleBaseInformer: roleBaseInformer, + roleBaseLister: roleBaseInformer.Lister(), + roleBaseSynced: roleBaseInformer.Informer().HasSynced, + fedWorkspaceCache: fedWorkspaceCache, + fedWorkspaceCacheController: fedWorkspaceCacheController, + multiClusterEnabled: multiClusterEnabled, + workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceTemplate"), + recorder: recorder, + } + klog.Info("Setting up event handlers") + workspaceTemplateInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctl.enqueueClusterRole, + UpdateFunc: func(old, new interface{}) { + ctl.enqueueClusterRole(new) + }, + DeleteFunc: ctl.enqueueClusterRole, + }) + return ctl +} + +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.workqueue.ShutDown() + + // Start the informer factories to begin populating the informer caches + klog.Info("Starting GlobalRole controller") + + // Wait for the caches to be synced before starting workers + klog.Info("Waiting for informer caches to sync") + + synced := make([]cache.InformerSynced, 0) + synced = append(synced, c.workspaceTemplateSynced, c.workspaceSynced, c.workspaceRoleSynced, c.roleBaseSynced) + if c.multiClusterEnabled { + synced = append(synced, c.fedWorkspaceCacheController.HasSynced) + } + if ok := cache.WaitForCacheSync(stopCh, synced...); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + klog.Info("Starting workers") + // Launch two workers to process Foo resources + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + klog.Info("Started workers") + <-stopCh + klog.Info("Shutting down workers") + return nil +} + +func (c *Controller) enqueueClusterRole(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + c.workqueue.Add(key) +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *Controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj interface{}) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date that when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.workqueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + // Run the reconcile, passing it the namespace/name string of the + // Foo resource to be synced. + if err := c.reconcile(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.workqueue.Forget(obj) + klog.Infof("Successfully synced %s:%s", "key", key) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + + return true +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Foo resource +// with the current status of the resource. +func (c *Controller) reconcile(key string) error { + + workspaceTemplate, err := c.workspaceTemplateLister.Get(key) + if err != nil { + // The user may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("workspace template '%s' in work queue no longer exists", key)) + return nil + } + klog.Error(err) + return err + } + + if err = c.initRoles(workspaceTemplate); err != nil { + klog.Error(err) + return err + } + + if c.multiClusterEnabled { + if err = c.multiClusterSync(workspaceTemplate); err != nil { + klog.Error(err) + return err + } + } else { + if err = c.sync(workspaceTemplate); err != nil { + klog.Error(err) + return err + } + } + + c.recorder.Event(workspaceTemplate, corev1.EventTypeNormal, successSynced, messageResourceSynced) + return nil +} + +func (c *Controller) Start(stopCh <-chan struct{}) error { + return c.Run(4, stopCh) +} + +func (c *Controller) multiClusterSync(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error { + + obj, exist, err := c.fedWorkspaceCache.GetByKey(workspaceTemplate.Name) + if !exist { + return c.createFederatedWorkspace(workspaceTemplate) + } + if err != nil { + klog.Error(err) + return err + } + + var fedWorkspace tenantv1alpha2.FederatedWorkspace + + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &fedWorkspace); err != nil { + klog.Error(err) + return err + } + + if !reflect.DeepEqual(fedWorkspace.Spec.Template.Spec, workspaceTemplate.Spec.WorkspaceSpec) || + !reflect.DeepEqual(fedWorkspace.Labels, workspaceTemplate.Labels) || + !reflect.DeepEqual(fedWorkspace.Annotations, workspaceTemplate.Annotations) || + !reflect.DeepEqual(fedWorkspace.Spec.Overrides, workspaceTemplate.Spec.Overrides) { + + fedWorkspace.Spec.Template.Spec = workspaceTemplate.Spec.WorkspaceSpec + fedWorkspace.Annotations = workspaceTemplate.Annotations + fedWorkspace.Labels = workspaceTemplate.Labels + fedWorkspace.Spec.Overrides = workspaceTemplate.Spec.Overrides + + return c.updateFederatedWorkspace(&fedWorkspace) + } + + return nil +} + +func (c *Controller) createFederatedWorkspace(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error { + clusters := make([]tenantv1alpha2.Cluster, 0) + for _, cluster := range workspaceTemplate.Spec.Clusters { + clusters = append(clusters, tenantv1alpha2.Cluster{Name: cluster}) + } + + federatedWorkspace := &tenantv1alpha2.FederatedWorkspace{ + TypeMeta: metav1.TypeMeta{ + Kind: tenantv1alpha2.FedWorkspaceKind, + APIVersion: tenantv1alpha2.FedWorkspaceResource.Group + "/" + tenantv1alpha2.FedWorkspaceResource.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: workspaceTemplate.Name, + }, + Spec: tenantv1alpha2.FederatedWorkspaceSpec{ + Template: tenantv1alpha2.Template{ + ObjectMeta: metav1.ObjectMeta{ + Labels: workspaceTemplate.Labels, + Annotations: workspaceTemplate.Annotations, + }, + Spec: workspaceTemplate.Spec.WorkspaceSpec, + }, + Placement: tenantv1alpha2.Placement{ + Clusters: clusters, + }, + Overrides: workspaceTemplate.Spec.Overrides, + }, + } + + err := controllerutil.SetControllerReference(workspaceTemplate, federatedWorkspace, scheme.Scheme) + if err != nil { + return err + } + + data, err := json.Marshal(federatedWorkspace) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + err = cli.RESTClient().Post(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s", tenantv1alpha2.FedWorkspaceResource.Group, + tenantv1alpha2.FedWorkspaceResource.Version, tenantv1alpha2.FedWorkspaceResource.Name)). + Body(data). + Do().Error() + + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) updateFederatedWorkspace(fedWorkspace *tenantv1alpha2.FederatedWorkspace) error { + + data, err := json.Marshal(fedWorkspace) + if err != nil { + return err + } + + cli := c.k8sClient.(*kubernetes.Clientset) + err = cli.RESTClient().Put(). + AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", tenantv1alpha2.FedWorkspaceResource.Group, + tenantv1alpha2.FedWorkspaceResource.Version, tenantv1alpha2.FedWorkspaceResource.Name, + fedWorkspace.Name)). + Body(data). + Do().Error() + + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + return nil +} + +func (c *Controller) sync(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error { + workspace, err := c.workspaceLister.Get(workspaceTemplate.Name) + if err != nil { + if errors.IsNotFound(err) { + return c.createWorkspace(workspaceTemplate) + } + klog.Error(err) + return err + } + + if !reflect.DeepEqual(workspace.Spec, workspaceTemplate.Spec.WorkspaceSpec) || + !reflect.DeepEqual(workspace.Labels, workspaceTemplate.Labels) || + !reflect.DeepEqual(workspace.Annotations, workspaceTemplate.Annotations) { + + workspace = workspace.DeepCopy() + workspace.Spec = workspaceTemplate.Spec.WorkspaceSpec + workspace.Annotations = workspaceTemplate.Annotations + workspace.Labels = workspaceTemplate.Labels + + return c.updateWorkspace(workspace) + } + + return nil +} + +func (c *Controller) createWorkspace(workspaceTemplate *tenantv1alpha2.WorkspaceTemplate) error { + workspace := &tenantv1alpha1.Workspace{ + ObjectMeta: metav1.ObjectMeta{ + Name: workspaceTemplate.Name, + Labels: workspaceTemplate.Labels, + Annotations: workspaceTemplate.Annotations, + }, + Spec: workspaceTemplate.Spec.WorkspaceSpec, + } + + err := controllerutil.SetControllerReference(workspaceTemplate, workspace, scheme.Scheme) + if err != nil { + return err + } + + _, err = c.ksClient.TenantV1alpha1().Workspaces().Create(workspace) + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + klog.Error(err) + return err + } + + return nil +} + +func (c *Controller) updateWorkspace(workspace *tenantv1alpha1.Workspace) error { + _, err := c.ksClient.TenantV1alpha1().Workspaces().Update(workspace) + if err != nil { + klog.Error(err) + return err + } + return nil +} + +func (r *Controller) initRoles(workspace *tenantv1alpha2.WorkspaceTemplate) error { + roleBases, err := r.roleBaseLister.List(labels.Everything()) + if err != nil { + klog.Error(err) + return err + } + + for _, roleBase := range roleBases { + var role iamv1alpha2.WorkspaceRole + if err = yaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(roleBase.Role.Raw), 1024).Decode(&role); err == nil { + old, err := r.workspaceRoleLister.Get(fmt.Sprintf("%s-%s", workspace.Name, role.Name)) + if err != nil { + if errors.IsNotFound(err) { + role.Name = fmt.Sprintf("%s-%s", workspace.Name, role.Name) + if role.Labels == nil { + role.Labels = make(map[string]string, 0) + } + role.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name + _, err = r.ksClient.IamV1alpha2().WorkspaceRoles().Create(&role) + if err != nil { + klog.Error(err) + return err + } + continue + } + } + + if !reflect.DeepEqual(role.Annotations, old.Annotations) || + !reflect.DeepEqual(role.Rules, old.Rules) { + updated := old.DeepCopy() + updated.Annotations = role.Annotations + updated.Rules = role.Rules + + _, err = r.ksClient.IamV1alpha2().WorkspaceRoles().Update(updated) + if err != nil { + klog.Error(err) + return err + } + } + } + } + return nil +} diff --git a/pkg/kapis/resources/v1alpha2/handler.go b/pkg/kapis/resources/v1alpha2/handler.go index f4f576487..f30b9b68d 100644 --- a/pkg/kapis/resources/v1alpha2/handler.go +++ b/pkg/kapis/resources/v1alpha2/handler.go @@ -48,7 +48,7 @@ func newResourceHandler(k8sClient kubernetes.Interface, factory informers.Inform routerOperator: routers.NewRouterOperator(k8sClient, factory.KubernetesSharedInformerFactory()), gitVerifier: git.NewGitVerifier(factory.KubernetesSharedInformerFactory()), registryGetter: registries.NewRegistryGetter(factory.KubernetesSharedInformerFactory()), - kubeconfigOperator: kubeconfig.NewOperator(k8sClient, nil, masterURL), + kubeconfigOperator: kubeconfig.NewReadOnlyOperator(factory.KubernetesSharedInformerFactory().Core().V1().ConfigMaps(), masterURL), kubectlOperator: kubectl.NewOperator(nil, factory.KubernetesSharedInformerFactory().Apps().V1().Deployments(), factory.KubernetesSharedInformerFactory().Core().V1().Pods(), factory.KubeSphereSharedInformerFactory().Iam().V1alpha2().Users()), diff --git a/pkg/kapis/tenant/v1alpha2/handler.go b/pkg/kapis/tenant/v1alpha2/handler.go index 45c5183fb..0ed5134e0 100644 --- a/pkg/kapis/tenant/v1alpha2/handler.go +++ b/pkg/kapis/tenant/v1alpha2/handler.go @@ -457,3 +457,26 @@ func (h *tenantHandler) PatchWorkspace(request *restful.Request, response *restf response.WriteEntity(patched) } + +func (h *tenantHandler) ListClusters(r *restful.Request, response *restful.Response) { + user, ok := request.UserFrom(r.Request.Context()) + + if !ok { + response.WriteEntity([]interface{}{}) + return + } + + result, err := h.tenant.ListClusters(user) + + if err != nil { + klog.Error(err) + if errors.IsNotFound(err) { + api.HandleNotFound(response, r, err) + return + } + api.HandleInternalError(response, r, err) + return + } + + response.WriteEntity(result) +} diff --git a/pkg/kapis/tenant/v1alpha2/register.go b/pkg/kapis/tenant/v1alpha2/register.go index 48ab479ce..9ce3ca7b1 100644 --- a/pkg/kapis/tenant/v1alpha2/register.go +++ b/pkg/kapis/tenant/v1alpha2/register.go @@ -51,6 +51,11 @@ func AddToContainer(c *restful.Container, factory informers.InformerFactory, k8s ws := runtime.NewWebService(GroupVersion) handler := newTenantHandler(factory, k8sclient, ksclient, evtsClient, loggingClient, auditingclient) + ws.Route(ws.GET("/clusters"). + To(handler.ListClusters). + Doc("List clusters available to users"). + Returns(http.StatusOK, api.StatusOK, api.ListResult{}). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag})) ws.Route(ws.POST("/workspaces"). To(handler.CreateWorkspace). Reads(tenantv1alpha2.WorkspaceTemplate{}). @@ -82,12 +87,12 @@ func AddToContainer(c *restful.Container, factory informers.InformerFactory, k8s Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag})) ws.Route(ws.GET("/workspaces/{workspace}"). To(handler.DescribeWorkspace). - Returns(http.StatusOK, api.StatusOK, models.PageableResponse{}). + Returns(http.StatusOK, api.StatusOK, tenantv1alpha2.WorkspaceTemplate{}). Doc("Describe workspace."). Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag})) ws.Route(ws.GET("/workspaces/{workspace}/clusters"). To(handler.ListWorkspaceClusters). - Returns(http.StatusOK, api.StatusOK, models.PageableResponse{}). + Returns(http.StatusOK, api.StatusOK, api.ListResult{}). Doc("List clusters authorized to the specified workspace."). Metadata(restfulspec.KeyOpenAPITags, []string{constants.TenantResourcesTag})) diff --git a/pkg/models/iam/am/am.go b/pkg/models/iam/am/am.go index 85e0e5aea..2c15fb32e 100644 --- a/pkg/models/iam/am/am.go +++ b/pkg/models/iam/am/am.go @@ -333,9 +333,9 @@ func (am *amOperator) GetGlobalRole(globalRole string) (*iamv1alpha2.GlobalRole, return obj.(*iamv1alpha2.GlobalRole), nil } -func (am *amOperator) CreateGlobalRoleBinding(username string, globalRole string) error { +func (am *amOperator) CreateGlobalRoleBinding(username string, role string) error { - _, err := am.GetGlobalRole(globalRole) + _, err := am.GetGlobalRole(role) if err != nil { klog.Error(err) @@ -350,7 +350,7 @@ func (am *amOperator) CreateGlobalRoleBinding(username string, globalRole string } for _, roleBinding := range roleBindings { - if globalRole == roleBinding.RoleRef.Name { + if role == roleBinding.RoleRef.Name { return nil } err := am.ksclient.IamV1alpha2().GlobalRoleBindings().Delete(roleBinding.Name, metav1.NewDeleteOptions(0)) @@ -365,7 +365,7 @@ func (am *amOperator) CreateGlobalRoleBinding(username string, globalRole string globalRoleBinding := iamv1alpha2.GlobalRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", username, globalRole), + Name: fmt.Sprintf("%s-%s", username, role), Labels: map[string]string{iamv1alpha2.UserReferenceLabel: username}, }, Subjects: []rbacv1.Subject{ @@ -378,7 +378,7 @@ func (am *amOperator) CreateGlobalRoleBinding(username string, globalRole string RoleRef: rbacv1.RoleRef{ APIGroup: iamv1alpha2.SchemeGroupVersion.Group, Kind: iamv1alpha2.ResourceKindGlobalRole, - Name: globalRole, + Name: role, }, } @@ -456,7 +456,7 @@ func (am *amOperator) CreateWorkspaceRoleBinding(username string, workspace stri roleBinding := iamv1alpha2.WorkspaceRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", role, username), + Name: fmt.Sprintf("%s-%s", username, role), Labels: map[string]string{iamv1alpha2.UserReferenceLabel: username, tenantv1alpha1.WorkspaceLabel: workspace}, }, diff --git a/pkg/models/iam/im/im.go b/pkg/models/iam/im/im.go index 975f3ed47..3f1607165 100644 --- a/pkg/models/iam/im/im.go +++ b/pkg/models/iam/im/im.go @@ -131,10 +131,7 @@ func (im *defaultIMOperator) ListUsers(query *query.Query) (result *api.ListResu for _, item := range result.Items { user := item.(*iamv1alpha2.User) - out := user.DeepCopy() - // ensure encrypted password will not be output - out.Spec.EncryptedPassword = "" - items = append(items, out) + items = append(items, ensurePasswordNotOutput(user)) } result.Items = items @@ -156,11 +153,8 @@ func (im *defaultIMOperator) DescribeUser(username string) (*iamv1alpha2.User, e } user := obj.(*iamv1alpha2.User) - out := user.DeepCopy() - // ensure encrypted password will not be output - out.Spec.EncryptedPassword = "" - return out, nil + return ensurePasswordNotOutput(user), nil } func (im *defaultIMOperator) DeleteUser(username string) error { @@ -175,3 +169,10 @@ func (im *defaultIMOperator) CreateUser(user *iamv1alpha2.User) (*iamv1alpha2.Us } return user, nil } + +func ensurePasswordNotOutput(user *iamv1alpha2.User) *iamv1alpha2.User { + out := user.DeepCopy() + // ensure encrypted password will not be output + out.Spec.EncryptedPassword = "" + return out +} diff --git a/pkg/models/kubeconfig/kubeconfig.go b/pkg/models/kubeconfig/kubeconfig.go index d4b8b0005..ecb1e5f5b 100644 --- a/pkg/models/kubeconfig/kubeconfig.go +++ b/pkg/models/kubeconfig/kubeconfig.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/user" + corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -60,30 +61,38 @@ type Interface interface { } type operator struct { - k8sclient kubernetes.Interface - config *rest.Config - masterURL string + k8sClient kubernetes.Interface + configMapInformer corev1informers.ConfigMapInformer + config *rest.Config + masterURL string } -func NewOperator(k8sclient kubernetes.Interface, config *rest.Config, masterURL string) Interface { - return &operator{k8sclient: k8sclient, config: config, masterURL: masterURL} +func NewOperator(k8sClient kubernetes.Interface, configMapInformer corev1informers.ConfigMapInformer, config *rest.Config) Interface { + return &operator{k8sClient: k8sClient, configMapInformer: configMapInformer, config: config} +} + +func NewReadOnlyOperator(configMapInformer corev1informers.ConfigMapInformer, masterURL string) Interface { + return &operator{configMapInformer: configMapInformer, masterURL: masterURL} } func (o *operator) CreateKubeConfig(user *iamv1alpha2.User) error { configName := fmt.Sprintf(kubeconfigNameFormat, user.Name) - _, err := o.k8sclient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Get(configName, metav1.GetOptions{}) + _, err := o.configMapInformer.Lister().ConfigMaps(constants.KubeSphereControlNamespace).Get(configName) + // already exist if err == nil { return nil } + // internal error if !errors.IsNotFound(err) { klog.Error(err) return err } + // create if not exist var ca []byte if len(o.config.CAData) > 0 { ca = o.config.CAData @@ -142,7 +151,7 @@ func (o *operator) CreateKubeConfig(user *iamv1alpha2.User) error { return err } - _, err = o.k8sclient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Create(cm) + _, err = o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Create(cm) if err != nil { klog.Errorln(err) @@ -154,7 +163,7 @@ func (o *operator) CreateKubeConfig(user *iamv1alpha2.User) error { func (o *operator) GetKubeConfig(username string) (string, error) { configName := fmt.Sprintf(kubeconfigNameFormat, username) - configMap, err := o.k8sclient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Get(configName, metav1.GetOptions{}) + configMap, err := o.configMapInformer.Lister().ConfigMaps(constants.KubeSphereControlNamespace).Get(configName) if err != nil { klog.Errorln(err) return "", err @@ -171,6 +180,7 @@ func (o *operator) GetKubeConfig(username string) (string, error) { masterURL := o.masterURL + // server host override if cluster := kubeconfig.Clusters[defaultClusterName]; cluster != nil { cluster.Server = masterURL } @@ -244,7 +254,7 @@ func (o *operator) createCSR(username string) ([]byte, error) { } // create csr - k8sCSR, err = o.k8sclient.CertificatesV1beta1().CertificateSigningRequests().Create(k8sCSR) + k8sCSR, err = o.k8sClient.CertificatesV1beta1().CertificateSigningRequests().Create(k8sCSR) if err != nil { klog.Errorln(err) @@ -256,14 +266,14 @@ func (o *operator) createCSR(username string) ([]byte, error) { func (o *operator) UpdateKubeconfig(username string, certificate []byte) error { configName := fmt.Sprintf(kubeconfigNameFormat, username) - configMap, err := o.k8sclient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Get(configName, metav1.GetOptions{}) + configMap, err := o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Get(configName, metav1.GetOptions{}) if err != nil { klog.Errorln(err) return err } configMap = appendCert(configMap, certificate) - _, err = o.k8sclient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Update(configMap) + _, err = o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Update(configMap) if err != nil { klog.Errorln(err) return err diff --git a/pkg/models/tenant/tenant.go b/pkg/models/tenant/tenant.go index d56fec2d6..19c915d4f 100644 --- a/pkg/models/tenant/tenant.go +++ b/pkg/models/tenant/tenant.go @@ -73,6 +73,7 @@ type Interface interface { UpdateNamespace(workspace string, namespace *corev1.Namespace) (*corev1.Namespace, error) PatchNamespace(workspace string, namespace *corev1.Namespace) (*corev1.Namespace, error) PatchWorkspace(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error) + ListClusters(info user.Info) (*api.ListResult, error) } type tenantOperator struct { @@ -355,6 +356,90 @@ func (t *tenantOperator) ListWorkspaceClusters(workspaceName string) (*api.ListR } return &api.ListResult{Items: clusters, TotalItems: len(clusters)}, nil } +func (t *tenantOperator) ListClusters(user user.Info) (*api.ListResult, error) { + + listClustersInGlobalScope := authorizer.AttributesRecord{ + User: user, + Verb: "list", + Resource: "clusters", + ResourceScope: request.GlobalScope, + ResourceRequest: true, + } + + allowedListClustersInGlobalScope, _, err := t.authorizer.Authorize(listClustersInGlobalScope) + + if err != nil { + klog.Error(err) + return nil, err + } + + listWorkspacesInGlobalScope := authorizer.AttributesRecord{ + User: user, + Verb: "list", + Resource: "workspaces", + ResourceScope: request.GlobalScope, + ResourceRequest: true, + } + + allowedListWorkspacesInGlobalScope, _, err := t.authorizer.Authorize(listWorkspacesInGlobalScope) + + if err != nil { + klog.Error(err) + return nil, err + } + + if allowedListClustersInGlobalScope == authorizer.DecisionAllow || + allowedListWorkspacesInGlobalScope == authorizer.DecisionAllow { + result, err := t.resourceGetter.List(clusterv1alpha1.ResourcesPluralCluster, "", query.New()) + if err != nil { + klog.Error(err) + return nil, err + } + return result, nil + } + + workspaceRoleBindings, err := t.am.ListWorkspaceRoleBindings(user.GetName(), "") + + if err != nil { + klog.Error(err) + return nil, err + } + + clusters := map[string]*clusterv1alpha1.Cluster{} + + for _, roleBinding := range workspaceRoleBindings { + workspaceName := roleBinding.Labels[tenantv1alpha1.WorkspaceLabel] + workspace, err := t.DescribeWorkspace(workspaceName) + if err != nil { + klog.Error(err) + return nil, err + } + + for _, clusterName := range workspace.Spec.Clusters { + // skip if cluster exist + if clusters[clusterName] != nil { + continue + } + obj, err := t.resourceGetter.Get(clusterv1alpha1.ResourcesPluralCluster, "", clusterName) + if err != nil { + klog.Error(err) + if errors.IsNotFound(err) { + continue + } + return nil, err + } + cluster := obj.(*clusterv1alpha1.Cluster) + clusters[clusterName] = cluster + } + } + + items := make([]interface{}, 0) + for _, cluster := range clusters { + items = append(items, cluster) + } + + return &api.ListResult{Items: items, TotalItems: len(items)}, nil +} func (t *tenantOperator) DeleteWorkspace(workspace string) error { return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Delete(workspace, metav1.NewDeleteOptions(0))