improve multicluster resource controller
Signed-off-by: hongming <talonwan@yunify.com>
This commit is contained in:
@@ -17,12 +17,17 @@ limitations under the License.
|
||||
package user
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@@ -36,7 +41,10 @@ import (
|
||||
kubespherescheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
userinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
userlister "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
@@ -50,12 +58,15 @@ const (
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
kubeconfig kubeconfig.Interface
|
||||
userInformer userinformer.UserInformer
|
||||
userLister userlister.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
kubeconfig kubeconfig.Interface
|
||||
userInformer userinformer.UserInformer
|
||||
userLister userlister.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
cmSynced cache.InformerSynced
|
||||
fedUserCache cache.Store
|
||||
fedUserController cache.Controller
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
@@ -64,11 +75,13 @@ type Controller struct {
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
recorder record.EventRecorder
|
||||
multiClusterEnabled bool
|
||||
}
|
||||
|
||||
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
|
||||
config *rest.Config, userInformer userinformer.UserInformer) *Controller {
|
||||
config *rest.Config, userInformer userinformer.UserInformer, fedUserCache cache.Store, fedUserController cache.Controller,
|
||||
configMapInformer corev1informers.ConfigMapInformer, multiClusterEnabled bool) *Controller {
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
@@ -81,17 +94,21 @@ func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
var kubeconfigOperator kubeconfig.Interface
|
||||
if config != nil {
|
||||
kubeconfigOperator = kubeconfig.NewOperator(k8sClient, config, "")
|
||||
kubeconfigOperator = kubeconfig.NewOperator(k8sClient, configMapInformer, config)
|
||||
}
|
||||
ctl := &Controller{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
kubeconfig: kubeconfigOperator,
|
||||
userInformer: userInformer,
|
||||
userLister: userInformer.Lister(),
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"),
|
||||
recorder: recorder,
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
kubeconfig: kubeconfigOperator,
|
||||
userInformer: userInformer,
|
||||
userLister: userInformer.Lister(),
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
cmSynced: configMapInformer.Informer().HasSynced,
|
||||
fedUserCache: fedUserCache,
|
||||
fedUserController: fedUserController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"),
|
||||
recorder: recorder,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
}
|
||||
klog.Info("Setting up event handlers")
|
||||
userInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@@ -108,14 +125,19 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
//init client
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting User controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.userSynced); !ok {
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.userSynced, c.cmSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedUserController.HasSynced)
|
||||
}
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
@@ -217,16 +239,22 @@ func (c *Controller) reconcile(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
user, err = c.encryptPassword(user.DeepCopy())
|
||||
|
||||
if err != nil {
|
||||
if user, err = c.ensurePasswordIsEncrypted(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if c.kubeconfig != nil {
|
||||
err = c.kubeconfig.CreateKubeConfig(user)
|
||||
if err != nil {
|
||||
// ensure user kubeconfig configmap is created
|
||||
if err = c.kubeconfig.CreateKubeConfig(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// synchronization through kubefed-controller when multi cluster is enabled
|
||||
if c.multiClusterEnabled {
|
||||
if err = c.multiClusterSync(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
@@ -240,9 +268,8 @@ func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
func (c *Controller) ensurePasswordIsEncrypted(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
encrypted, err := strconv.ParseBool(user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation])
|
||||
|
||||
// password is not encrypted
|
||||
if err != nil || !encrypted {
|
||||
password, err := encrypt(user.Spec.EncryptedPassword)
|
||||
@@ -250,21 +277,148 @@ func (c *Controller) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User,
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
user = user.DeepCopy()
|
||||
user.Spec.EncryptedPassword = password
|
||||
if user.Annotations == nil {
|
||||
user.Annotations = make(map[string]string, 0)
|
||||
}
|
||||
|
||||
user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation] = "true"
|
||||
user.Status.State = iamv1alpha2.UserActive
|
||||
|
||||
updated, err := c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
|
||||
return updated, err
|
||||
return c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(user *iamv1alpha2.User) error {
|
||||
if user.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if user.Labels == nil {
|
||||
user.Labels = make(map[string]string, 0)
|
||||
}
|
||||
user = user.DeepCopy()
|
||||
user.Labels[constants.KubefedManagedLabel] = "false"
|
||||
_, err := c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(user *iamv1alpha2.User) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
obj, exist, err := c.fedUserCache.GetByKey(user.Name)
|
||||
if !exist {
|
||||
return c.createFederatedUser(user)
|
||||
}
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var federatedUser iamv1alpha2.FederatedUser
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.(*unstructured.Unstructured).Object, &federatedUser); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(federatedUser.Spec.Template.Spec, user.Spec) ||
|
||||
!reflect.DeepEqual(federatedUser.Spec.Template.Status, user.Status) ||
|
||||
!reflect.DeepEqual(federatedUser.Labels, user.Labels) ||
|
||||
!reflect.DeepEqual(federatedUser.Annotations, user.Annotations) {
|
||||
|
||||
federatedUser.Labels = user.Labels
|
||||
federatedUser.Spec.Template.Spec = user.Spec
|
||||
federatedUser.Spec.Template.Status = user.Status
|
||||
federatedUser.Spec.Template.Labels = user.Labels
|
||||
federatedUser.Spec.Template.Annotations = user.Annotations
|
||||
return c.updateFederatedUser(&federatedUser)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedUser(user *iamv1alpha2.User) error {
|
||||
federatedUser := &iamv1alpha2.FederatedUser{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedUserKind,
|
||||
APIVersion: iamv1alpha2.FedUserResource.Group + "/" + iamv1alpha2.FedUserResource.Version,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: user.Name,
|
||||
},
|
||||
Spec: iamv1alpha2.FederatedUserSpec{
|
||||
Template: iamv1alpha2.UserTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: user.Labels,
|
||||
Annotations: user.Annotations,
|
||||
},
|
||||
Spec: user.Spec,
|
||||
Status: user.Status,
|
||||
},
|
||||
Placement: iamv1alpha2.Placement{
|
||||
ClusterSelector: iamv1alpha2.ClusterSelector{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// must bind user lifecycle
|
||||
err := controllerutil.SetControllerReference(user, federatedUser, scheme.Scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := json.Marshal(federatedUser)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Post().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s", iamv1alpha2.FedUserResource.Group,
|
||||
iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedUser(fedUser *iamv1alpha2.FederatedUser) error {
|
||||
data, err := json.Marshal(fedUser)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedUserResource.Group,
|
||||
iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name, fedUser.Name)).
|
||||
Body(data).
|
||||
Do().Error()
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encrypt(password string) (string, error) {
|
||||
// when user is already mapped to another identity, password is empty by default
|
||||
// unable to log in directly until password reset
|
||||
|
||||
Reference in New Issue
Block a user