Merge pull request #3140 from wansir/identity-provider
improve identity provider plugin
This commit is contained in:
162
pkg/controller/loginrecord/loginrecord_controller.go
Normal file
162
pkg/controller/loginrecord/loginrecord_controller.go
Normal file
@@ -0,0 +1,162 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loginrecord
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
|
||||
successSynced = "Synced"
|
||||
// is synced successfully
|
||||
messageResourceSynced = "LoginRecord synced successfully"
|
||||
controllerName = "loginrecord-controller"
|
||||
)
|
||||
|
||||
type loginRecordController struct {
|
||||
controller.BaseController
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
loginRecordLister iamv1alpha2listers.LoginRecordLister
|
||||
loginRecordSynced cache.InformerSynced
|
||||
userLister iamv1alpha2listers.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
loginHistoryRetentionPeriod time.Duration
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewLoginRecordController(k8sClient kubernetes.Interface,
|
||||
ksClient kubesphere.Interface,
|
||||
loginRecordInformer iamv1alpha2informers.LoginRecordInformer,
|
||||
userInformer iamv1alpha2informers.UserInformer,
|
||||
loginHistoryRetentionPeriod time.Duration) *loginRecordController {
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName})
|
||||
ctl := &loginRecordController{
|
||||
BaseController: controller.BaseController{
|
||||
Workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "LoginRecords"),
|
||||
Synced: []cache.InformerSynced{loginRecordInformer.Informer().HasSynced, userInformer.Informer().HasSynced},
|
||||
Name: controllerName,
|
||||
},
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
loginRecordLister: loginRecordInformer.Lister(),
|
||||
userLister: userInformer.Lister(),
|
||||
loginHistoryRetentionPeriod: loginHistoryRetentionPeriod,
|
||||
recorder: recorder,
|
||||
}
|
||||
ctl.Handler = ctl.reconcile
|
||||
klog.Info("Setting up event handlers")
|
||||
loginRecordInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.Enqueue,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.Enqueue(new)
|
||||
},
|
||||
DeleteFunc: ctl.Enqueue,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *loginRecordController) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(5, stopCh)
|
||||
}
|
||||
|
||||
func (c *loginRecordController) reconcile(key string) error {
|
||||
loginRecord, err := c.loginRecordLister.Get(key)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("login record '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !loginRecord.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is being deleted
|
||||
// Our finalizer has finished, so the reconciler can do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = c.updateUserLastLoginTime(loginRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
// login record beyonds retention period
|
||||
if loginRecord.CreationTimestamp.Add(c.loginHistoryRetentionPeriod).Before(now) {
|
||||
if err = c.ksClient.IamV1alpha2().LoginRecords().Delete(loginRecord.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
} else { // put item back into the queue
|
||||
c.Workqueue.AddAfter(key, loginRecord.CreationTimestamp.Add(c.loginHistoryRetentionPeriod).Sub(now))
|
||||
}
|
||||
c.recorder.Event(loginRecord, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateUserLastLoginTime accepts a login object and set user lastLoginTime field
|
||||
func (c *loginRecordController) updateUserLastLoginTime(loginRecord *iamv1alpha2.LoginRecord) error {
|
||||
username, ok := loginRecord.Labels[iamv1alpha2.UserReferenceLabel]
|
||||
if !ok || len(username) == 0 {
|
||||
klog.V(4).Info("login doesn't belong to any user")
|
||||
return nil
|
||||
}
|
||||
user, err := c.userLister.Get(username)
|
||||
if err != nil {
|
||||
// ignore not found error
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(4).Infof("user %s doesn't exist any more, login record will be deleted later", username)
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
// update lastLoginTime
|
||||
if user.DeletionTimestamp.IsZero() &&
|
||||
(user.Status.LastLoginTime == nil || user.Status.LastLoginTime.Before(&loginRecord.CreationTimestamp)) {
|
||||
user.Status.LastLoginTime = &loginRecord.CreationTimestamp
|
||||
user, err = c.ksClient.IamV1alpha2().Users().UpdateStatus(user)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
265
pkg/controller/loginrecord/loginrecord_controller_test.go
Normal file
265
pkg/controller/loginrecord/loginrecord_controller_test.go
Normal file
@@ -0,0 +1,265 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loginrecord
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
alwaysReady = func() bool { return true }
|
||||
noResyncPeriodFunc = func() time.Duration { return 0 }
|
||||
)
|
||||
|
||||
type fixture struct {
|
||||
t *testing.T
|
||||
|
||||
ksclient *fake.Clientset
|
||||
k8sclient *k8sfake.Clientset
|
||||
// Objects to put in the store.
|
||||
user *iamv1alpha2.User
|
||||
loginRecord *iamv1alpha2.LoginRecord
|
||||
// Actions expected to happen on the client.
|
||||
kubeactions []core.Action
|
||||
actions []core.Action
|
||||
// Objects from here preloaded into NewSimpleFake.
|
||||
kubeobjects []runtime.Object
|
||||
objects []runtime.Object
|
||||
}
|
||||
|
||||
func newFixture(t *testing.T) *fixture {
|
||||
f := &fixture{}
|
||||
f.t = t
|
||||
f.objects = []runtime.Object{}
|
||||
f.kubeobjects = []runtime.Object{}
|
||||
return f
|
||||
}
|
||||
|
||||
func newUser(name string) *iamv1alpha2.User {
|
||||
return &iamv1alpha2.User{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: iamv1alpha2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: iamv1alpha2.UserSpec{
|
||||
Email: fmt.Sprintf("%s@kubesphere.io", name),
|
||||
Lang: "zh-CN",
|
||||
Description: "fake user",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newLoginRecord(username string) *iamv1alpha2.LoginRecord {
|
||||
return &iamv1alpha2.LoginRecord{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: iamv1alpha2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: username,
|
||||
CreationTimestamp: metav1.Now(),
|
||||
Labels: map[string]string{iamv1alpha2.UserReferenceLabel: username},
|
||||
},
|
||||
Spec: iamv1alpha2.LoginRecordSpec{
|
||||
Type: iamv1alpha2.Token,
|
||||
Success: true,
|
||||
Reason: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fixture) newController() (*loginRecordController, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
|
||||
f.ksclient = fake.NewSimpleClientset(f.objects...)
|
||||
f.k8sclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
|
||||
|
||||
ksInformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc())
|
||||
k8sInformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc())
|
||||
if err := ksInformers.Iam().V1alpha2().Users().Informer().GetIndexer().Add(f.user); err != nil {
|
||||
f.t.Errorf("add user:%s", err)
|
||||
}
|
||||
if err := ksInformers.Iam().V1alpha2().LoginRecords().Informer().GetIndexer().Add(f.loginRecord); err != nil {
|
||||
f.t.Errorf("add login record:%s", err)
|
||||
}
|
||||
|
||||
c := NewLoginRecordController(f.k8sclient, f.ksclient,
|
||||
ksInformers.Iam().V1alpha2().LoginRecords(),
|
||||
ksInformers.Iam().V1alpha2().Users(),
|
||||
time.Minute*5)
|
||||
c.userSynced = alwaysReady
|
||||
c.loginRecordSynced = alwaysReady
|
||||
c.recorder = &record.FakeRecorder{}
|
||||
|
||||
return c, ksInformers, k8sInformers
|
||||
}
|
||||
|
||||
func (f *fixture) run(userName string) {
|
||||
f.runController(userName, true, false)
|
||||
}
|
||||
|
||||
func (f *fixture) runExpectError(userName string) {
|
||||
f.runController(userName, true, true)
|
||||
}
|
||||
|
||||
func (f *fixture) runController(user string, startInformers bool, expectError bool) {
|
||||
c, i, k8sI := f.newController()
|
||||
if startInformers {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
i.Start(stopCh)
|
||||
k8sI.Start(stopCh)
|
||||
}
|
||||
|
||||
err := c.reconcile(user)
|
||||
if !expectError && err != nil {
|
||||
f.t.Errorf("error syncing user: %v", err)
|
||||
} else if expectError && err == nil {
|
||||
f.t.Error("expected error syncing user, got nil")
|
||||
}
|
||||
|
||||
actions := filterInformerActions(f.ksclient.Actions())
|
||||
for j, action := range actions {
|
||||
if len(f.actions) < j+1 {
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[j:])
|
||||
break
|
||||
}
|
||||
|
||||
expectedAction := f.actions[j]
|
||||
checkAction(expectedAction, action, f.t)
|
||||
}
|
||||
|
||||
if len(f.actions) > len(actions) {
|
||||
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
|
||||
}
|
||||
|
||||
k8sActions := filterInformerActions(f.k8sclient.Actions())
|
||||
for k, action := range k8sActions {
|
||||
if len(f.kubeactions) < k+1 {
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[k:])
|
||||
break
|
||||
}
|
||||
|
||||
expectedAction := f.kubeactions[k]
|
||||
checkAction(expectedAction, action, f.t)
|
||||
}
|
||||
|
||||
if len(f.kubeactions) > len(k8sActions) {
|
||||
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
|
||||
}
|
||||
}
|
||||
|
||||
// checkAction verifies that expected and actual actions are equal and both have
|
||||
// same attached resources
|
||||
func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
|
||||
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
|
||||
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
|
||||
return
|
||||
}
|
||||
|
||||
switch a := actual.(type) {
|
||||
case core.CreateActionImpl:
|
||||
e, _ := expected.(core.CreateActionImpl)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
if !reflect.DeepEqual(expObject, object) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
|
||||
}
|
||||
case core.UpdateActionImpl:
|
||||
e, _ := expected.(core.UpdateActionImpl)
|
||||
expObject := e.GetObject()
|
||||
object := a.GetObject()
|
||||
expUser := expObject.(*iamv1alpha2.User)
|
||||
user := object.(*iamv1alpha2.User)
|
||||
expUser.Status.LastTransitionTime = nil
|
||||
user.Status.LastTransitionTime = nil
|
||||
if !reflect.DeepEqual(expUser, user) {
|
||||
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
|
||||
}
|
||||
case core.PatchActionImpl:
|
||||
e, _ := expected.(core.PatchActionImpl)
|
||||
expPatch := e.GetPatch()
|
||||
patch := a.GetPatch()
|
||||
if !reflect.DeepEqual(expPatch, patch) {
|
||||
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
|
||||
}
|
||||
default:
|
||||
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
|
||||
actual.GetVerb(), actual.GetResource().Resource)
|
||||
}
|
||||
}
|
||||
|
||||
// filterInformerActions filters list and watch actions for testing resources.
|
||||
// Since list and watch don't change resource state we can filter it to lower
|
||||
// nose level in our tests.
|
||||
func filterInformerActions(actions []core.Action) []core.Action {
|
||||
var ret []core.Action
|
||||
for _, action := range actions {
|
||||
ret = append(ret, action)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (f *fixture) expectUpdateUserStatusAction(user *iamv1alpha2.User) {
|
||||
expect := user.DeepCopy()
|
||||
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "users"}, "", expect)
|
||||
action.Subresource = "status"
|
||||
expect.Status.LastLoginTime = &f.loginRecord.CreationTimestamp
|
||||
f.actions = append(f.actions, action)
|
||||
}
|
||||
|
||||
func getKey(user *iamv1alpha2.User, t *testing.T) string {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(user)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting key for user %v: %v", user.Name, err)
|
||||
return ""
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func TestDoNothing(t *testing.T) {
|
||||
f := newFixture(t)
|
||||
user := newUser("test")
|
||||
loginRecord := newLoginRecord("test")
|
||||
|
||||
f.user = user
|
||||
f.loginRecord = loginRecord
|
||||
f.objects = append(f.objects, user, loginRecord)
|
||||
|
||||
f.expectUpdateUserStatusAction(user)
|
||||
f.run(getKey(user, t))
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 The KubeSphere Authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* /
|
||||
*/
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
"time"
|
||||
)
|
||||
|
||||
type LoginRecordController struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
loginRecordInformer iamv1alpha2informers.LoginRecordInformer
|
||||
loginRecordLister iamv1alpha2listers.LoginRecordLister
|
||||
loginRecordSynced cache.InformerSynced
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
loginHistoryRetentionPeriod time.Duration
|
||||
}
|
||||
|
||||
func NewLoginRecordController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
|
||||
loginRecordInformer iamv1alpha2informers.LoginRecordInformer,
|
||||
loginHistoryRetentionPeriod time.Duration) *LoginRecordController {
|
||||
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "loginrecord-controller"})
|
||||
ctl := &LoginRecordController{
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
loginRecordInformer: loginRecordInformer,
|
||||
loginRecordLister: loginRecordInformer.Lister(),
|
||||
loginRecordSynced: loginRecordInformer.Informer().HasSynced,
|
||||
loginHistoryRetentionPeriod: loginHistoryRetentionPeriod,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "loginrecord"),
|
||||
recorder: recorder,
|
||||
}
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting LoginRecord controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.loginRecordSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
go wait.Until(func() {
|
||||
if err := c.sync(); err != nil {
|
||||
klog.Errorf("Error periodically sync user status, %v", err)
|
||||
}
|
||||
}, time.Hour, stopCh)
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) enqueueLoginRecord(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// We wrap this block in a func so we can defer c.workqueue.Done.
|
||||
err := func(obj interface{}) error {
|
||||
// We call Done here so the workqueue knows we have finished
|
||||
// processing this item. We also must remember to call Forget if we
|
||||
// do not want this work item being re-queued. For example, we do
|
||||
// not call Forget if a transient error occurs, instead the item is
|
||||
// put back on the workqueue and attempted again after a back-off
|
||||
// period.
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
// We expect strings to come off the workqueue. These are of the
|
||||
// form namespace/name. We do this as the delayed nature of the
|
||||
// workqueue means the items in the informer cache may actually be
|
||||
// more up to date that when the item was initially put onto the
|
||||
// workqueue.
|
||||
if key, ok = obj.(string); !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the reconcile, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.reconcile(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) reconcile(key string) error {
|
||||
loginRecord, err := c.loginRecordLister.Get(key)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("login record '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if loginRecord.CreationTimestamp.Add(c.loginHistoryRetentionPeriod).Before(now) { // login record beyonds retention period
|
||||
if err = c.ksClient.IamV1alpha2().LoginRecords().Delete(loginRecord.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
} else { // put item back into the queue
|
||||
c.workqueue.AddAfter(key, loginRecord.CreationTimestamp.Add(c.loginHistoryRetentionPeriod).Sub(now))
|
||||
}
|
||||
c.recorder.Event(loginRecord, corev1.EventTypeNormal, successSynced, messageResourceSynced)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(4, stopCh)
|
||||
}
|
||||
|
||||
func (c *LoginRecordController) sync() error {
|
||||
records, err := c.loginRecordLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, record := range records {
|
||||
key, err := cache.MetaNamespaceKeyFunc(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.workqueue.AddRateLimited(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -19,8 +19,8 @@ package user
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -62,46 +61,36 @@ const (
|
||||
// is synced successfully
|
||||
messageResourceSynced = "User synced successfully"
|
||||
controllerName = "user-controller"
|
||||
|
||||
// user finalizer
|
||||
finalizer = "finalizers.kubesphere.io/users"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
kubeconfig kubeconfig.Interface
|
||||
userLister iamv1alpha2listers.UserLister
|
||||
userSynced cache.InformerSynced
|
||||
loginRecordLister iamv1alpha2listers.LoginRecordLister
|
||||
loginRecordSynced cache.InformerSynced
|
||||
cmSynced cache.InformerSynced
|
||||
fedUserCache cache.Store
|
||||
fedUserController cache.Controller
|
||||
ldapClient ldapclient.Interface
|
||||
devopsClient devops.Interface
|
||||
// workqueue is a rate limited work queue. This is used to queue work to be
|
||||
// processed instead of performing it as soon as a change happens. This
|
||||
// means we can ensure we only process a fixed amount of resources at a
|
||||
// time, and makes it easy to ensure we are never processing the same item
|
||||
// simultaneously in two different workers.
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
type userController struct {
|
||||
controller.BaseController
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
kubeconfig kubeconfig.Interface
|
||||
userLister iamv1alpha2listers.UserLister
|
||||
loginRecordLister iamv1alpha2listers.LoginRecordLister
|
||||
fedUserCache cache.Store
|
||||
ldapClient ldapclient.Interface
|
||||
devopsClient devops.Interface
|
||||
authenticationOptions *authoptions.AuthenticationOptions
|
||||
multiClusterEnabled bool
|
||||
// recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewUserController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,
|
||||
config *rest.Config, userInformer iamv1alpha2informers.UserInformer,
|
||||
fedUserCache cache.Store, fedUserController cache.Controller,
|
||||
func NewUserController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface, config *rest.Config,
|
||||
userInformer iamv1alpha2informers.UserInformer,
|
||||
loginRecordInformer iamv1alpha2informers.LoginRecordInformer,
|
||||
fedUserCache cache.Store, fedUserController cache.Controller,
|
||||
configMapInformer corev1informers.ConfigMapInformer,
|
||||
ldapClient ldapclient.Interface,
|
||||
devopsClient devops.Interface,
|
||||
authenticationOptions *authoptions.AuthenticationOptions,
|
||||
multiClusterEnabled bool) *Controller {
|
||||
multiClusterEnabled bool) *userController {
|
||||
|
||||
utilruntime.Must(kubespherescheme.AddToScheme(scheme.Scheme))
|
||||
|
||||
@@ -113,152 +102,48 @@ func NewUserController(k8sClient kubernetes.Interface, ksClient kubesphere.Inter
|
||||
if config != nil {
|
||||
kubeconfigOperator = kubeconfig.NewOperator(k8sClient, configMapInformer, config)
|
||||
}
|
||||
ctl := &Controller{
|
||||
ctl := &userController{
|
||||
BaseController: controller.BaseController{
|
||||
Workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "User"),
|
||||
Synced: []cache.InformerSynced{
|
||||
userInformer.Informer().HasSynced,
|
||||
configMapInformer.Informer().HasSynced,
|
||||
loginRecordInformer.Informer().HasSynced,
|
||||
},
|
||||
Name: controllerName,
|
||||
},
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
kubeconfig: kubeconfigOperator,
|
||||
userLister: userInformer.Lister(),
|
||||
userSynced: userInformer.Informer().HasSynced,
|
||||
loginRecordLister: loginRecordInformer.Lister(),
|
||||
loginRecordSynced: loginRecordInformer.Informer().HasSynced,
|
||||
cmSynced: configMapInformer.Informer().HasSynced,
|
||||
fedUserCache: fedUserCache,
|
||||
fedUserController: fedUserController,
|
||||
ldapClient: ldapClient,
|
||||
devopsClient: devopsClient,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Users"),
|
||||
recorder: recorder,
|
||||
multiClusterEnabled: multiClusterEnabled,
|
||||
authenticationOptions: authenticationOptions,
|
||||
}
|
||||
|
||||
if multiClusterEnabled {
|
||||
ctl.Synced = append(ctl.Synced, fedUserController.HasSynced)
|
||||
}
|
||||
ctl.Handler = ctl.reconcile
|
||||
klog.Info("Setting up event handlers")
|
||||
userInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ctl.enqueueUser,
|
||||
AddFunc: ctl.Enqueue,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
ctl.enqueueUser(new)
|
||||
},
|
||||
DeleteFunc: ctl.enqueueUser,
|
||||
})
|
||||
|
||||
loginRecordInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(new interface{}) {
|
||||
if err := ctl.enqueueLogin(new); err != nil {
|
||||
klog.Errorf("Failed to enqueue login object, error: %v", err)
|
||||
}
|
||||
ctl.Enqueue(new)
|
||||
},
|
||||
DeleteFunc: ctl.Enqueue,
|
||||
})
|
||||
return ctl
|
||||
}
|
||||
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting User controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
|
||||
synced := make([]cache.InformerSynced, 0)
|
||||
synced = append(synced, c.userSynced, c.loginRecordSynced, c.cmSynced)
|
||||
if c.multiClusterEnabled {
|
||||
synced = append(synced, c.fedUserController.HasSynced)
|
||||
}
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, synced...); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
return nil
|
||||
func (c *userController) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(5, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueUser(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
err := func(obj interface{}) error {
|
||||
defer c.workqueue.Done(obj)
|
||||
var key string
|
||||
var ok bool
|
||||
if key, ok = obj.(string); !ok {
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
if err := c.reconcile(key); err != nil {
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
}
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced %s:%s", "key", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// enqueueLogin accepts a login object and set user lastLoginTime field
|
||||
func (c *Controller) enqueueLogin(object interface{}) error {
|
||||
login := object.(*iamv1alpha2.LoginRecord)
|
||||
username, ok := login.Labels[iamv1alpha2.UserReferenceLabel]
|
||||
|
||||
if !ok || len(username) == 0 {
|
||||
return fmt.Errorf("login doesn't belong to any user")
|
||||
}
|
||||
|
||||
user, err := c.userLister.Get(username)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("user %s doesn't exist any more, login record will be deleted later", username)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if user.Status.LastLoginTime == nil || user.Status.LastLoginTime.Before(&login.CreationTimestamp) {
|
||||
user.Status.LastLoginTime = &login.CreationTimestamp
|
||||
user, err = c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) reconcile(key string) error {
|
||||
|
||||
func (c *userController) reconcile(key string) error {
|
||||
// Get the user with this name
|
||||
user, err := c.userLister.Get(key)
|
||||
if err != nil {
|
||||
@@ -307,7 +192,7 @@ func (c *Controller) reconcile(key string) error {
|
||||
|
||||
if c.devopsClient != nil {
|
||||
// unassign jenkins role, unassign multiple times is allowed
|
||||
if err := c.unassignDevOpsAdminRole(user); err != nil {
|
||||
if err = c.unassignDevOpsAdminRole(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
@@ -340,7 +225,7 @@ func (c *Controller) reconcile(key string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if user, err = c.ensurePasswordIsEncrypted(user); err != nil {
|
||||
if user, err = c.encryptPassword(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
@@ -361,7 +246,7 @@ func (c *Controller) reconcile(key string) error {
|
||||
if c.devopsClient != nil {
|
||||
// assign jenkins role after user create, assign multiple times is allowed
|
||||
// used as logged-in users can do anything
|
||||
if err := c.assignDevOpsAdminRole(user); err != nil {
|
||||
if err = c.assignDevOpsAdminRole(user); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
@@ -379,14 +264,9 @@ func (c *Controller) reconcile(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) error {
|
||||
return c.Run(5, stopCh)
|
||||
}
|
||||
|
||||
func (c *Controller) ensurePasswordIsEncrypted(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
encrypted := user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation] == "true"
|
||||
// password is not encrypted
|
||||
if !encrypted {
|
||||
func (c *userController) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
// password is not empty and not encrypted
|
||||
if user.Spec.EncryptedPassword != "" && !isEncrypted(user.Spec.EncryptedPassword) {
|
||||
password, err := encrypt(user.Spec.EncryptedPassword)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
@@ -395,22 +275,16 @@ func (c *Controller) ensurePasswordIsEncrypted(user *iamv1alpha2.User) (*iamv1al
|
||||
user = user.DeepCopy()
|
||||
user.Spec.EncryptedPassword = password
|
||||
if user.Annotations == nil {
|
||||
user.Annotations = make(map[string]string, 0)
|
||||
user.Annotations = make(map[string]string)
|
||||
}
|
||||
// ensure plain text password won't be kept anywhere
|
||||
delete(user.Annotations, corev1.LastAppliedConfigAnnotation)
|
||||
user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation] = "true"
|
||||
user.Status = iamv1alpha2.UserStatus{
|
||||
State: iamv1alpha2.UserActive,
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
return c.ksClient.IamV1alpha2().Users().Update(user)
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (c *Controller) ensureNotControlledByKubefed(user *iamv1alpha2.User) error {
|
||||
func (c *userController) ensureNotControlledByKubefed(user *iamv1alpha2.User) error {
|
||||
if user.Labels[constants.KubefedManagedLabel] != "false" {
|
||||
if user.Labels == nil {
|
||||
user.Labels = make(map[string]string, 0)
|
||||
@@ -425,7 +299,7 @@ func (c *Controller) ensureNotControlledByKubefed(user *iamv1alpha2.User) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) multiClusterSync(user *iamv1alpha2.User) error {
|
||||
func (c *userController) multiClusterSync(user *iamv1alpha2.User) error {
|
||||
|
||||
if err := c.ensureNotControlledByKubefed(user); err != nil {
|
||||
klog.Error(err)
|
||||
@@ -458,7 +332,7 @@ func (c *Controller) multiClusterSync(user *iamv1alpha2.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createFederatedUser(user *iamv1alpha2.User) error {
|
||||
func (c *userController) createFederatedUser(user *iamv1alpha2.User) error {
|
||||
federatedUser := &iamv1alpha2.FederatedUser{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.FedUserKind,
|
||||
@@ -506,14 +380,13 @@ func (c *Controller) createFederatedUser(user *iamv1alpha2.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) updateFederatedUser(fedUser *iamv1alpha2.FederatedUser) error {
|
||||
func (c *userController) updateFederatedUser(fedUser *iamv1alpha2.FederatedUser) error {
|
||||
data, err := json.Marshal(fedUser)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli := c.k8sClient.(*kubernetes.Clientset)
|
||||
|
||||
err = cli.RESTClient().Put().
|
||||
AbsPath(fmt.Sprintf("/apis/%s/%s/%s/%s", iamv1alpha2.FedUserResource.Group,
|
||||
iamv1alpha2.FedUserResource.Version, iamv1alpha2.FedUserResource.Name, fedUser.Name)).
|
||||
@@ -529,7 +402,7 @@ func (c *Controller) updateFederatedUser(fedUser *iamv1alpha2.FederatedUser) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) assignDevOpsAdminRole(user *iamv1alpha2.User) error {
|
||||
func (c *userController) assignDevOpsAdminRole(user *iamv1alpha2.User) error {
|
||||
if err := c.devopsClient.AssignGlobalRole(modelsdevops.JenkinsAdminRoleName, user.Name); err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return err
|
||||
@@ -537,7 +410,7 @@ func (c *Controller) assignDevOpsAdminRole(user *iamv1alpha2.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) unassignDevOpsAdminRole(user *iamv1alpha2.User) error {
|
||||
func (c *userController) unassignDevOpsAdminRole(user *iamv1alpha2.User) error {
|
||||
if err := c.devopsClient.UnAssignGlobalRole(modelsdevops.JenkinsAdminRoleName, user.Name); err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return err
|
||||
@@ -545,9 +418,8 @@ func (c *Controller) unassignDevOpsAdminRole(user *iamv1alpha2.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) ldapSync(user *iamv1alpha2.User) error {
|
||||
encrypted, _ := strconv.ParseBool(user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation])
|
||||
if encrypted {
|
||||
func (c *userController) ldapSync(user *iamv1alpha2.User) error {
|
||||
if isEncrypted(user.Spec.EncryptedPassword) {
|
||||
return nil
|
||||
}
|
||||
_, err := c.ldapClient.Get(user.Name)
|
||||
@@ -564,14 +436,12 @@ func (c *Controller) ldapSync(user *iamv1alpha2.User) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) deleteGroupBindings(user *iamv1alpha2.User) error {
|
||||
|
||||
func (c *userController) deleteGroupBindings(user *iamv1alpha2.User) error {
|
||||
// Groupbindings that created by kubeshpere will be deleted directly.
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{iamv1alpha2.UserReferenceLabel: user.Name}).String(),
|
||||
}
|
||||
deleteOptions := metav1.NewDeleteOptions(0)
|
||||
|
||||
if err := c.ksClient.IamV1alpha2().GroupBindings().
|
||||
DeleteCollection(deleteOptions, listOptions); err != nil {
|
||||
klog.Error(err)
|
||||
@@ -580,12 +450,11 @@ func (c *Controller) deleteGroupBindings(user *iamv1alpha2.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) deleteRoleBindings(user *iamv1alpha2.User) error {
|
||||
func (c *userController) deleteRoleBindings(user *iamv1alpha2.User) error {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{iamv1alpha2.UserReferenceLabel: user.Name}).String(),
|
||||
}
|
||||
deleteOptions := metav1.NewDeleteOptions(0)
|
||||
|
||||
if err := c.ksClient.IamV1alpha2().GlobalRoleBindings().
|
||||
DeleteCollection(deleteOptions, listOptions); err != nil {
|
||||
klog.Error(err)
|
||||
@@ -619,7 +488,7 @@ func (c *Controller) deleteRoleBindings(user *iamv1alpha2.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) deleteLoginRecords(user *iamv1alpha2.User) error {
|
||||
func (c *userController) deleteLoginRecords(user *iamv1alpha2.User) error {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{iamv1alpha2.UserReferenceLabel: user.Name}).String(),
|
||||
}
|
||||
@@ -634,28 +503,60 @@ func (c *Controller) deleteLoginRecords(user *iamv1alpha2.User) error {
|
||||
}
|
||||
|
||||
// syncUserStatus will reconcile user state based on user login records
|
||||
func (c *Controller) syncUserStatus(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
func (c *userController) syncUserStatus(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
// disabled user, nothing to do
|
||||
if user == nil || (user.Status.State == iamv1alpha2.UserDisabled) {
|
||||
if user.Status.State != nil && *user.Status.State == iamv1alpha2.UserDisabled {
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// mapped user from other identity provider always active until disabled
|
||||
if user.Spec.EncryptedPassword == "" &&
|
||||
user.Labels[iamv1alpha2.IdentifyProviderLabel] != "" &&
|
||||
(user.Status.State == nil || *user.Status.State != iamv1alpha2.UserActive) {
|
||||
expected := user.DeepCopy()
|
||||
active := iamv1alpha2.UserActive
|
||||
expected.Status = iamv1alpha2.UserStatus{
|
||||
State: &active,
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
return c.ksClient.IamV1alpha2().Users().UpdateStatus(expected)
|
||||
}
|
||||
|
||||
// becomes inactive after setting a blank password
|
||||
if user.Spec.EncryptedPassword == "" &&
|
||||
user.Labels[iamv1alpha2.IdentifyProviderLabel] == "" {
|
||||
expected := user.DeepCopy()
|
||||
expected.Status = iamv1alpha2.UserStatus{
|
||||
State: nil,
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
return c.ksClient.IamV1alpha2().Users().UpdateStatus(expected)
|
||||
}
|
||||
|
||||
// becomes active after password encrypted
|
||||
if isEncrypted(user.Spec.EncryptedPassword) &&
|
||||
user.Status.State == nil {
|
||||
expected := user.DeepCopy()
|
||||
active := iamv1alpha2.UserActive
|
||||
expected.Status = iamv1alpha2.UserStatus{
|
||||
State: &active,
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
return c.ksClient.IamV1alpha2().Users().UpdateStatus(expected)
|
||||
}
|
||||
|
||||
// blocked user, check if need to unblock user
|
||||
if user.Status.State == iamv1alpha2.UserAuthLimitExceeded {
|
||||
if user.Status.State != nil && *user.Status.State == iamv1alpha2.UserAuthLimitExceeded {
|
||||
if user.Status.LastTransitionTime != nil &&
|
||||
user.Status.LastTransitionTime.Add(c.authenticationOptions.AuthenticateRateLimiterDuration).Before(time.Now()) {
|
||||
expected := user.DeepCopy()
|
||||
// unblock user
|
||||
if user.Annotations[iamv1alpha2.PasswordEncryptedAnnotation] == "true" {
|
||||
expected.Status = iamv1alpha2.UserStatus{
|
||||
State: iamv1alpha2.UserActive,
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected.Status, user.Status) {
|
||||
return c.ksClient.IamV1alpha2().Users().Update(expected)
|
||||
active := iamv1alpha2.UserActive
|
||||
expected.Status = iamv1alpha2.UserStatus{
|
||||
State: &active,
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
return c.ksClient.IamV1alpha2().Users().UpdateStatus(expected)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -670,7 +571,8 @@ func (c *Controller) syncUserStatus(user *iamv1alpha2.User) (*iamv1alpha2.User,
|
||||
now := time.Now()
|
||||
failedLoginAttempts := 0
|
||||
for _, loginRecord := range records {
|
||||
if !loginRecord.Spec.Success && loginRecord.CreationTimestamp.Add(c.authenticationOptions.AuthenticateRateLimiterDuration).After(now) {
|
||||
if !loginRecord.Spec.Success &&
|
||||
loginRecord.CreationTimestamp.Add(c.authenticationOptions.AuthenticateRateLimiterDuration).After(now) {
|
||||
failedLoginAttempts++
|
||||
}
|
||||
}
|
||||
@@ -678,26 +580,29 @@ func (c *Controller) syncUserStatus(user *iamv1alpha2.User) (*iamv1alpha2.User,
|
||||
// block user if failed login attempts exceeds maximum tries setting
|
||||
if failedLoginAttempts >= c.authenticationOptions.AuthenticateRateLimiterMaxTries {
|
||||
expect := user.DeepCopy()
|
||||
limitExceed := iamv1alpha2.UserAuthLimitExceeded
|
||||
expect.Status = iamv1alpha2.UserStatus{
|
||||
State: iamv1alpha2.UserAuthLimitExceeded,
|
||||
State: &limitExceed,
|
||||
Reason: fmt.Sprintf("Failed login attempts exceed %d in last %s", failedLoginAttempts, c.authenticationOptions.AuthenticateRateLimiterDuration),
|
||||
LastTransitionTime: &metav1.Time{Time: time.Now()},
|
||||
}
|
||||
|
||||
// block user for AuthenticateRateLimiterDuration duration, after that put it back to the queue to unblock
|
||||
c.workqueue.AddAfter(user.Name, c.authenticationOptions.AuthenticateRateLimiterDuration)
|
||||
|
||||
return c.ksClient.IamV1alpha2().Users().Update(expect)
|
||||
c.Workqueue.AddAfter(user.Name, c.authenticationOptions.AuthenticateRateLimiterDuration)
|
||||
return c.ksClient.IamV1alpha2().Users().UpdateStatus(expect)
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func encrypt(password string) (string, error) {
|
||||
// when user is already mapped to another identity, password is empty by default
|
||||
// unable to log in directly until password reset
|
||||
if password == "" {
|
||||
return "", nil
|
||||
}
|
||||
bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
// isEncrypted returns whether the given password is encrypted
|
||||
func isEncrypted(password string) bool {
|
||||
// bcrypt.Cost returns the hashing cost used to create the given hashed
|
||||
cost, _ := bcrypt.Cost([]byte(password))
|
||||
// cost > 0 means the password has been encrypted
|
||||
return cost > 0
|
||||
}
|
||||
|
||||
@@ -80,32 +80,32 @@ func newUser(name string) *iamv1alpha2.User {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fixture) newController() (*Controller, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
|
||||
func (f *fixture) newController() (*userController, ksinformers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
|
||||
f.ksclient = fake.NewSimpleClientset(f.objects...)
|
||||
f.k8sclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
|
||||
ldapClient := ldapclient.NewSimpleLdap()
|
||||
|
||||
ksinformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc())
|
||||
k8sinformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc())
|
||||
ksInformers := ksinformers.NewSharedInformerFactory(f.ksclient, noResyncPeriodFunc())
|
||||
k8sInformers := kubeinformers.NewSharedInformerFactory(f.k8sclient, noResyncPeriodFunc())
|
||||
|
||||
for _, user := range f.userLister {
|
||||
err := ksinformers.Iam().V1alpha2().Users().Informer().GetIndexer().Add(user)
|
||||
err := ksInformers.Iam().V1alpha2().Users().Informer().GetIndexer().Add(user)
|
||||
if err != nil {
|
||||
f.t.Errorf("add user:%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
c := NewUserController(f.k8sclient, f.ksclient, nil,
|
||||
ksinformers.Iam().V1alpha2().Users(),
|
||||
ksInformers.Iam().V1alpha2().Users(),
|
||||
ksInformers.Iam().V1alpha2().LoginRecords(),
|
||||
nil, nil,
|
||||
ksinformers.Iam().V1alpha2().LoginRecords(),
|
||||
k8sinformers.Core().V1().ConfigMaps(),
|
||||
k8sInformers.Core().V1().ConfigMaps(),
|
||||
ldapClient, nil,
|
||||
options.NewAuthenticateOptions(), false)
|
||||
c.userSynced = alwaysReady
|
||||
c.Synced = []cache.InformerSynced{alwaysReady}
|
||||
c.recorder = &record.FakeRecorder{}
|
||||
|
||||
return c, ksinformers, k8sinformers
|
||||
return c, ksInformers, k8sInformers
|
||||
}
|
||||
|
||||
func (f *fixture) run(userName string) {
|
||||
@@ -230,14 +230,14 @@ func filterInformerActions(actions []core.Action) []core.Action {
|
||||
|
||||
func (f *fixture) expectUpdateUserStatusAction(user *iamv1alpha2.User) {
|
||||
expect := user.DeepCopy()
|
||||
//expect.Status.State = iamv1alpha2.UserActive
|
||||
expect.Finalizers = []string{"finalizers.kubesphere.io/users"}
|
||||
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "users"}, "", expect)
|
||||
f.actions = append(f.actions, action)
|
||||
|
||||
expect = expect.DeepCopy()
|
||||
expect.Status.State = iamv1alpha2.UserActive
|
||||
expect.Annotations = map[string]string{iamv1alpha2.PasswordEncryptedAnnotation: "true"}
|
||||
action = core.NewUpdateAction(schema.GroupVersionResource{Resource: "users"}, "", expect)
|
||||
action.Subresource = "status"
|
||||
f.actions = append(f.actions, action)
|
||||
}
|
||||
|
||||
|
||||
@@ -39,10 +39,7 @@ func (a *EmailValidator) Handle(ctx context.Context, req admission.Request) admi
|
||||
}
|
||||
|
||||
allUsers := v1alpha2.UserList{}
|
||||
|
||||
err = a.Client.List(ctx, &allUsers, &client.ListOptions{})
|
||||
|
||||
if err != nil {
|
||||
if err = a.Client.List(ctx, &allUsers, &client.ListOptions{}); err != nil {
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
@@ -51,7 +48,6 @@ func (a *EmailValidator) Handle(ctx context.Context, req admission.Request) admi
|
||||
}
|
||||
|
||||
alreadyExist := emailAlreadyExist(allUsers, user)
|
||||
|
||||
if alreadyExist {
|
||||
return admission.Errored(http.StatusConflict, fmt.Errorf("user email: %s already exists", user.Spec.Email))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user