fix controller Start method signature

Signed-off-by: Roland.Ma <rolandma@yunify.com>
This commit is contained in:
Roland.Ma
2021-08-11 09:05:53 +00:00
parent 810bfb618a
commit 2fcfb81066
39 changed files with 106 additions and 120 deletions

View File

@@ -17,9 +17,9 @@ limitations under the License.
package app
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/kubefed/pkg/controller/util"
@@ -125,9 +125,9 @@ func addControllers(
return err
}
fedUserCache, fedUserCacheController = util.NewResourceInformer(fedUserClient, "", &iamv1alpha2.FedUserResource, func(object runtime.Object) {})
fedGlobalRoleCache, fedGlobalRoleCacheController = util.NewResourceInformer(fedGlobalRoleClient, "", &iamv1alpha2.FedGlobalRoleResource, func(object runtime.Object) {})
fedGlobalRoleBindingCache, fedGlobalRoleBindingCacheController = util.NewResourceInformer(fedGlobalRoleBindingClient, "", &iamv1alpha2.FedGlobalRoleBindingResource, func(object runtime.Object) {})
fedUserCache, fedUserCacheController = util.NewResourceInformer(fedUserClient, "", &iamv1alpha2.FedUserResource, func(object runtimeclient.Object) {})
fedGlobalRoleCache, fedGlobalRoleCacheController = util.NewResourceInformer(fedGlobalRoleClient, "", &iamv1alpha2.FedGlobalRoleResource, func(object runtimeclient.Object) {})
fedGlobalRoleBindingCache, fedGlobalRoleBindingCacheController = util.NewResourceInformer(fedGlobalRoleBindingClient, "", &iamv1alpha2.FedGlobalRoleBindingResource, func(object runtimeclient.Object) {})
go fedUserCacheController.Run(stopCh)
go fedGlobalRoleCacheController.Run(stopCh)

View File

@@ -25,7 +25,6 @@ import (
"time"
"github.com/stretchr/testify/assert"
"k8s.io/api/auditregistration/v1alpha1"
v1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/apis/audit"
@@ -53,7 +52,7 @@ func TestGetAuditLevel(t *testing.T) {
Name: "kube-auditing-webhook",
},
Spec: auditingv1alpha1.WebhookSpec{
AuditLevel: v1alpha1.LevelRequestResponse,
AuditLevel: auditingv1alpha1.LevelRequestResponse,
},
}
@@ -82,7 +81,7 @@ func TestAuditing_Enabled(t *testing.T) {
Name: "kube-auditing-webhook",
},
Spec: auditingv1alpha1.WebhookSpec{
AuditLevel: v1alpha1.LevelNone,
AuditLevel: auditingv1alpha1.LevelNone,
},
}
@@ -111,7 +110,7 @@ func TestAuditing_K8sAuditingEnabled(t *testing.T) {
Name: "kube-auditing-webhook",
},
Spec: auditingv1alpha1.WebhookSpec{
AuditLevel: v1alpha1.LevelNone,
AuditLevel: auditingv1alpha1.LevelNone,
K8sAuditingEnabled: true,
},
}
@@ -141,7 +140,7 @@ func TestAuditing_LogRequestObject(t *testing.T) {
Name: "kube-auditing-webhook",
},
Spec: auditingv1alpha1.WebhookSpec{
AuditLevel: v1alpha1.LevelRequestResponse,
AuditLevel: auditingv1alpha1.LevelRequestResponse,
K8sAuditingEnabled: true,
},
}
@@ -232,7 +231,7 @@ func TestAuditing_LogResponseObject(t *testing.T) {
Name: "kube-auditing-webhook",
},
Spec: auditingv1alpha1.WebhookSpec{
AuditLevel: v1alpha1.LevelMetadata,
AuditLevel: auditingv1alpha1.LevelMetadata,
K8sAuditingEnabled: true,
},
}

View File

@@ -57,7 +57,7 @@ type ApplicationReconciler struct {
ApplicationSelector labels.Selector //
}
func (r *ApplicationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *ApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var app appv1beta1.Application
err := r.Get(context.Background(), req.NamespacedName, &app)
if err != nil {
@@ -258,7 +258,7 @@ func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
return err
}
sources := []runtime.Object{
sources := []client.Object{
&v1.Deployment{},
&corev1.Service{},
&v1.StatefulSet{},
@@ -271,21 +271,21 @@ func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Watch for changes to Application
err = c.Watch(
&source.Kind{Type: s},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(
func(h handler.MapObject) []reconcile.Request {
handler.EnqueueRequestsFromMapFunc(
func(h client.Object) []reconcile.Request {
return []reconcile.Request{{NamespacedName: types.NamespacedName{
Name: servicemesh.GetApplictionName(h.Meta.GetLabels()),
Namespace: h.Meta.GetNamespace()}}}
})},
Name: servicemesh.GetApplictionName(h.GetLabels()),
Namespace: h.GetNamespace()}}}
}),
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return isApp(e.MetaOld, e.MetaNew)
return isApp(e.ObjectOld, e.ObjectOld)
},
CreateFunc: func(e event.CreateEvent) bool {
return isApp(e.Meta)
return isApp(e.Object)
},
DeleteFunc: func(e event.DeleteEvent) bool {
return isApp(e.Meta)
return isApp(e.Object)
},
})
if err != nil {

View File

@@ -107,7 +107,7 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
err := k8sClient.Create(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to create a test namespace")
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
mgr, err := ctrl.NewManager(cfg, ctrl.Options{MetricsBindAddress: "0"})
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
selector, _ := labels.Parse("app.kubernetes.io/name,!kubesphere.io/creator")
@@ -122,7 +122,7 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
Expect(err).NotTo(HaveOccurred(), "failed to setup application reconciler")
go func() {
err = mgr.Start(stopCh)
err = mgr.Start(context.Background())
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
}()
})

View File

@@ -240,8 +240,8 @@ func (c *Controller) reconcile(key string) error {
return nil
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(4, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) Approve(csr *certificatesv1beta1.CertificateSigningRequest) error {

View File

@@ -197,8 +197,8 @@ func NewClusterController(
return c
}
func (c *clusterController) Start(stopCh <-chan struct{}) error {
return c.Run(3, stopCh)
func (c *clusterController) Start(ctx context.Context) error {
return c.Run(3, ctx.Done())
}
func (c *clusterController) Run(workers int, stopCh <-chan struct{}) error {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package clusterrolebinding
import (
"context"
"fmt"
"time"
@@ -232,6 +233,6 @@ func (c *Controller) reconcile(key string) error {
return nil
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(4, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}

View File

@@ -160,8 +160,8 @@ func NewDestinationRuleController(deploymentInformer informersv1.DeploymentInfor
}
func (v *DestinationRuleController) Start(stopCh <-chan struct{}) error {
return v.Run(5, stopCh)
func (v *DestinationRuleController) Start(ctx context.Context) error {
return v.Run(5, ctx.Done())
}
func (v *DestinationRuleController) Run(workers int, stopCh <-chan struct{}) error {

View File

@@ -227,8 +227,8 @@ func (c *Controller) reconcile(key string) error {
return nil
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(4, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) multiClusterSync(ctx context.Context, globalRole *iamv1alpha2.GlobalRole) error {

View File

@@ -249,8 +249,8 @@ func (c *Controller) reconcile(key string) error {
return nil
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(4, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) multiClusterSync(globalRoleBinding *iamv1alpha2.GlobalRoleBinding) error {

View File

@@ -113,8 +113,8 @@ func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface
return ctl
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(1, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(1, ctx.Done())
}
// reconcile handles Group informer events, clear up related reource when group is being deleted.

View File

@@ -180,8 +180,8 @@ func (c *Controller) reconcile(key string) error {
return nil
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(2, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(2, ctx.Done())
}
func (c *Controller) unbindUser(groupBinding *iamv1alpha2.GroupBinding) error {

View File

@@ -87,8 +87,8 @@ func NewJobController(jobInformer batchv1informers.JobInformer, client clientset
}
func (v *JobController) Start(stopCh <-chan struct{}) error {
return v.Run(5, stopCh)
func (v *JobController) Start(ctx context.Context) error {
return v.Run(5, ctx.Done())
}
func (v *JobController) Run(workers int, stopCh <-chan struct{}) error {

View File

@@ -104,8 +104,8 @@ func NewLoginRecordController(k8sClient kubernetes.Interface,
return ctl
}
func (c *loginRecordController) Start(stopCh <-chan struct{}) error {
return c.Run(5, stopCh)
func (c *loginRecordController) Start(ctx context.Context) error {
return c.Run(5, ctx.Done())
}
func (c *loginRecordController) reconcile(key string) error {

View File

@@ -86,7 +86,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=rolebases,verbs=get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("namespace", req.NamespacedName)
rootCtx := context.Background()
namespace := &corev1.Namespace{}

View File

@@ -332,9 +332,9 @@ func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
return nil, c.updateIPPoolStatus(pool)
}
func (c *IPPoolController) Start(stopCh <-chan struct{}) error {
go c.provider.SyncStatus(stopCh, c.ippoolQueue)
return c.Run(5, stopCh)
func (c *IPPoolController) Start(ctx context.Context) error {
go c.provider.SyncStatus(ctx.Done(), c.ippoolQueue)
return c.Run(5, ctx.Done())
}
func (c *IPPoolController) Run(workers int, stopCh <-chan struct{}) error {

View File

@@ -77,7 +77,7 @@ var _ = Describe("test ippool", func() {
stopCh := make(chan struct{})
go ksInformer.Start(stopCh)
go k8sInformer.Start(stopCh)
go c.Start(stopCh)
go c.Start(context.Background())
It("test create ippool", func() {
clone := pool.DeepCopy()

View File

@@ -692,8 +692,8 @@ func NewNSNetworkPolicyController(
return controller
}
func (c *NSNetworkPolicyController) Start(stopCh <-chan struct{}) error {
return c.Run(defaultThread, defaultSync, stopCh)
func (c *NSNetworkPolicyController) Start(ctx context.Context) error {
return c.Run(defaultThread, defaultSync, ctx.Done())
}
// Run starts the controller.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package nsnetworkpolicy
import (
"context"
"fmt"
"reflect"
"strings"
@@ -157,7 +158,7 @@ var _ = Describe("Nsnetworkpolicy", func() {
c.workspaceInformerSynced = alwaysReady
c.informerSynced = alwaysReady
go c.Start(stopCh)
go c.Start(context.Background())
})
It("test func namespaceNetworkIsolateEnabled", func() {

View File

@@ -21,7 +21,7 @@ import (
"net/http"
"sync"
"k8s.io/api/admission/v1beta1"
v1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
@@ -98,7 +98,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
// Get the object in the request
obj := validator.Obj.DeepCopyObject()
if req.Operation == v1beta1.Create {
if req.Operation == v1.Create {
err := h.decoder.Decode(req, obj)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
@@ -110,7 +110,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
}
}
if req.Operation == v1beta1.Update {
if req.Operation == v1.Update {
oldObj := obj.DeepCopyObject()
err := h.decoder.DecodeRaw(req.Object, obj)
@@ -128,7 +128,7 @@ func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) a
}
}
if req.Operation == v1beta1.Delete {
if req.Operation == v1.Delete {
// In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346
// OldObject contains the object being deleted
err := h.decoder.DecodeRaw(req.OldObject, obj)

View File

@@ -25,7 +25,6 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -58,7 +57,7 @@ const (
type Controller struct {
client.Client
ksCache cache.Cache
reconciledObjs []runtime.Object
reconciledObjs []client.Object
informerSynced []toolscache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
@@ -217,34 +216,27 @@ func (c *Controller) processNextWorkItem() bool {
// with the current status of the resource.
func (c *Controller) reconcile(obj interface{}) error {
runtimeObj, ok := obj.(runtime.Object)
runtimeObj, ok := obj.(client.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("object does not implement the Object interfaces"))
return nil
}
runtimeObj = runtimeObj.DeepCopyObject()
accessor, err := meta.Accessor(runtimeObj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("object does not implement the Object interfaces"))
return nil
}
// Only reconcile the secret which created by notification manager.
if secret, ok := obj.(*corev1.Secret); ok {
if secret.Namespace != constants.NotificationSecretNamespace ||
secret.Labels == nil || secret.Labels[constants.NotificationManagedLabel] != "true" {
klog.V(8).Infof("No need to reconcile secret %s/%s", accessor.GetNamespace(), accessor.GetName())
klog.V(8).Infof("No need to reconcile secret %s/%s", runtimeObj.GetNamespace(), runtimeObj.GetName())
return nil
}
}
name := accessor.GetName()
name := runtimeObj.GetName()
// The notification controller should update the annotations of secrets managed by itself
// whenever a cluster is added or deleted. This way, the controller will have a chance to override the secret.
if _, ok := obj.(*v1alpha1.Cluster); ok {
err = c.updateSecret()
err := c.updateSecret()
if err != nil {
klog.Errorf("update secret failed, %s", err)
return err
@@ -253,7 +245,7 @@ func (c *Controller) reconcile(obj interface{}) error {
return nil
}
err = c.Get(context.Background(), client.ObjectKey{Name: accessor.GetName(), Namespace: accessor.GetNamespace()}, runtimeObj)
err := c.Get(context.Background(), client.ObjectKey{Name: runtimeObj.GetName(), Namespace: runtimeObj.GetNamespace()}, runtimeObj)
if err != nil {
// The user may no longer exist, in which case we stop
// processing.
@@ -276,11 +268,11 @@ func (c *Controller) reconcile(obj interface{}) error {
return nil
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(4, stopCh)
func (c *Controller) Start(ctx context.Context) error {
return c.Run(4, ctx.Done())
}
func (c *Controller) multiClusterSync(ctx context.Context, obj runtime.Object) error {
func (c *Controller) multiClusterSync(ctx context.Context, obj client.Object) error {
if err := c.ensureNotControlledByKubefed(ctx, obj); err != nil {
klog.Error(err)
@@ -546,23 +538,17 @@ func (c *Controller) updateSecret() error {
return nil
}
func (c *Controller) ensureNotControlledByKubefed(ctx context.Context, obj runtime.Object) error {
func (c *Controller) ensureNotControlledByKubefed(ctx context.Context, obj client.Object) error {
accessor, err := meta.Accessor(obj)
if err != nil {
klog.Error(err)
return nil
}
labels := accessor.GetLabels()
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string, 0)
}
if labels[constants.KubefedManagedLabel] != "false" {
labels[constants.KubefedManagedLabel] = "false"
accessor.SetLabels(labels)
err := c.Update(ctx, accessor.(runtime.Object))
obj.SetLabels(labels)
err := c.Update(ctx, obj)
if err != nil {
klog.Error(err)
}

View File

@@ -25,7 +25,6 @@ import (
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
fakek8s "k8s.io/client-go/kubernetes/fake"
@@ -228,19 +227,19 @@ func (f *fakeCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVers
}
// GetInformer returns the informer for the obj
func (f *fakeCache) GetInformer(ctx context.Context, obj runtime.Object) (cache.Informer, error) {
func (f *fakeCache) GetInformer(ctx context.Context, obj client.Object) (cache.Informer, error) {
fakeInformerFactory := k8sinformers.NewSharedInformerFactory(f.K8sClient, defaultResync)
return fakeInformerFactory.Core().V1().Namespaces().Informer(), nil
}
func (f *fakeCache) IndexField(ctx context.Context, obj runtime.Object, field string, extractValue client.IndexerFunc) error {
func (f *fakeCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
return nil
}
func (f *fakeCache) Start(stopCh <-chan struct{}) error {
func (f *fakeCache) Start(ctx context.Context) error {
return nil
}
func (f *fakeCache) WaitForCacheSync(stop <-chan struct{}) bool {
func (f *fakeCache) WaitForCacheSync(ctx context.Context) bool {
return true
}

View File

@@ -54,7 +54,7 @@ const (
appFinalizer = "helmapplication.application.kubesphere.io"
)
func (r *ReconcileHelmApplication) Reconcile(request reconcile.Request) (reconcile.Result, error) {
func (r *ReconcileHelmApplication) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
klog.V(4).Infof("sync helm application: %s ", request.String())
rootCtx := context.Background()

View File

@@ -49,7 +49,7 @@ type ReconcileHelmApplicationVersion struct {
// Reconcile reads that state of the cluster for a helmapplicationversions object and makes changes based on the state read
// and what is in the helmapplicationversions.Spec
func (r *ReconcileHelmApplicationVersion) Reconcile(request reconcile.Request) (reconcile.Result, error) {
func (r *ReconcileHelmApplicationVersion) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
klog.V(4).Infof("sync helm application version: %s", request.String())
defer func() {

View File

@@ -194,7 +194,7 @@ type ReconcileHelmCategory struct {
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmcategories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmcategories/status,verbs=get;update;patch
func (r *ReconcileHelmCategory) Reconcile(request reconcile.Request) (reconcile.Result, error) {
func (r *ReconcileHelmCategory) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
klog.V(4).Infof("sync helm category: %s", request.String())
defer func() {

View File

@@ -97,7 +97,7 @@ type ReconcileHelmRelease struct {
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
func (r *ReconcileHelmRelease) Reconcile(request reconcile.Request) (reconcile.Result, error) {
func (r *ReconcileHelmRelease) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
// Fetch the helmReleases instance
instance := &v1alpha1.HelmRelease{}
err := r.Get(context.TODO(), request.NamespacedName, instance)

View File

@@ -103,7 +103,7 @@ type ReconcileHelmRepo struct {
// and what is in the helmreleases.Spec
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmrepos,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmrepos/status,verbs=get;update;patch
func (r *ReconcileHelmRepo) Reconcile(request reconcile.Request) (reconcile.Result, error) {
func (r *ReconcileHelmRepo) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
klog.Infof("sync repo: %s", request.Name)
defer func() {

View File

@@ -112,7 +112,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles
return err
}
resources := []runtime.Object{
resources := []client.Object{
&corev1.Pod{},
&corev1.Service{},
&corev1.PersistentVolumeClaim{},
@@ -121,7 +121,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles
for _, resource := range resources {
err := c.Watch(
&source.Kind{Type: resource},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.mapper)},
handler.EnqueueRequestsFromMapFunc(r.mapper),
predicate.Funcs{
GenericFunc: func(e event.GenericEvent) bool {
return false
@@ -132,7 +132,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles
UpdateFunc: func(e event.UpdateEvent) bool {
notifyChange := false
// we only want to queue the updates we care about though as too much noise will overwhelm queue.
switch e.MetaOld.(type) {
switch e.ObjectOld.(type) {
case *corev1.Pod:
oldPod := e.ObjectOld.(*corev1.Pod)
newPod := e.ObjectNew.(*corev1.Pod)
@@ -157,14 +157,14 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles
return nil
}
func (r *Reconciler) mapper(h handler.MapObject) []reconcile.Request {
func (r *Reconciler) mapper(h client.Object) []reconcile.Request {
// check if the quota controller can evaluate this kind, if not, ignore it altogether...
var result []reconcile.Request
evaluators := r.registry.List()
ctx := context.TODO()
resourceQuotaNames, err := resourceQuotaNamesFor(ctx, r.Client, h.Meta.GetNamespace())
resourceQuotaNames, err := resourceQuotaNamesFor(ctx, r.Client, h.GetNamespace())
if err != nil {
klog.Errorf("failed to get resource quota names for: %v %T %v, err: %v", h.Meta.GetNamespace(), h.Object, h.Meta.GetName(), err)
klog.Errorf("failed to get resource quota names for: %v %T %v, err: %v", h.GetNamespace(), h, h.GetName(), err)
return result
}
// only queue those quotas that are tracking a resource associated with this kind.
@@ -183,11 +183,11 @@ func (r *Reconciler) mapper(h handler.MapObject) []reconcile.Request {
}
}
}
klog.V(6).Infof("resource quota reconcile after resource change: %v %T %v, %+v", h.Meta.GetNamespace(), h.Object, h.Meta.GetName(), result)
klog.V(6).Infof("resource quota reconcile after resource change: %v %T %v, %+v", h.GetNamespace(), h, h.GetName(), result)
return result
}
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.logger.WithValues("resourcequota", req.NamespacedName)
rootCtx := context.TODO()
resourceQuota := &quotav1alpha2.ResourceQuota{}

View File

@@ -70,9 +70,9 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.logger.WithValues("serivceaccount", req.NamespacedName)
ctx := context.Background()
// ctx := context.Background()
sa := &corev1.ServiceAccount{}
if err := r.Get(ctx, req.NamespacedName, sa); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)

View File

@@ -94,7 +94,7 @@ var _ = Describe("ServiceAccount", func() {
Expect(reconciler.Create(ctx, sa)).Should(Succeed())
Expect(reconciler.Create(ctx, role)).Should(Succeed())
_, err := reconciler.Reconcile(req)
_, err := reconciler.Reconcile(ctx, req)
Expect(err).To(BeNil())
By("Expecting to bind role successfully")
@@ -116,7 +116,7 @@ var _ = Describe("ServiceAccount", func() {
}
Expect(reconciler.Create(ctx, sa)).Should(Succeed())
_, err := reconciler.Reconcile(req)
_, err := reconciler.Reconcile(ctx, req)
Expect(apierrors.IsNotFound(err)).To(BeTrue())
})
})

View File

@@ -149,8 +149,8 @@ func NewController(
return controller
}
func (c *StorageCapabilityController) Start(stopCh <-chan struct{}) error {
return c.Run(5, stopCh)
func (c *StorageCapabilityController) Start(ctx context.Context) error {
return c.Run(5, ctx.Done())
}
func (c *StorageCapabilityController) Run(threadCnt int, stopCh <-chan struct{}) error {

View File

@@ -150,8 +150,8 @@ func NewUserController(k8sClient kubernetes.Interface, ksClient kubesphere.Inter
return ctl
}
func (c *userController) Start(stopCh <-chan struct{}) error {
return c.Run(5, stopCh)
func (c *userController) Start(ctx context.Context) error {
return c.Run(5, ctx.Done())
}
func (c *userController) reconcile(key string) error {

View File

@@ -155,8 +155,8 @@ func NewVirtualServiceController(serviceInformer coreinformers.ServiceInformer,
}
func (v *VirtualServiceController) Start(stopCh <-chan struct{}) error {
return v.Run(5, stopCh)
func (v *VirtualServiceController) Start(ctx context.Context) error {
return v.Run(5, ctx.Done())
}
func (v *VirtualServiceController) Run(workers int, stopCh <-chan struct{}) error {

View File

@@ -77,7 +77,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=rolebases,verbs=get;list;watch
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspaceroles,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspacerolebindings,verbs=get;list;watch;create;update;patch;delete
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("workspace", req.NamespacedName)
rootCtx := context.Background()
workspace := &tenantv1alpha1.Workspace{}

View File

@@ -84,7 +84,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspaceroles,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspaceroles,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("workspacerole", req.NamespacedName)
rootCtx := context.Background()
workspaceRole := &iamv1alpha2.WorkspaceRole{}

View File

@@ -84,7 +84,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspacerolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspacerolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("workspacerolebinding", req.NamespacedName)
rootCtx := context.Background()
workspaceRoleBinding := &iamv1alpha2.WorkspaceRoleBinding{}

View File

@@ -89,7 +89,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=iam.kubesphere.io,resources=workspacerolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=types.kubefed.io,resources=federatedworkspacerolebindings,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("workspacetemplate", req.NamespacedName)
rootCtx := context.Background()
workspaceTemplate := &tenantv1alpha2.WorkspaceTemplate{}

View File

@@ -86,7 +86,7 @@ var _ = Describe("WorkspaceTemplate", func() {
req := ctrl.Request{
NamespacedName: key,
}
_, err := reconciler.Reconcile(req)
_, err := reconciler.Reconcile(context.Background(), req)
Expect(err).To(BeNil())
By("Expecting to create workspace template successfully")
@@ -117,7 +117,7 @@ var _ = Describe("WorkspaceTemplate", func() {
updated.Spec.Template.Spec.Manager = "admin"
Expect(reconciler.Update(context.Background(), updated)).Should(Succeed())
_, err = reconciler.Reconcile(req)
_, err = reconciler.Reconcile(context.Background(), req)
Expect(err).To(BeNil())
// List workspace role bindings
@@ -138,7 +138,7 @@ var _ = Describe("WorkspaceTemplate", func() {
return reconciler.Update(context.Background(), f)
}, timeout, interval).Should(Succeed())
_, err = reconciler.Reconcile(req)
_, err = reconciler.Reconcile(context.Background(), req)
Expect(err).To(BeNil())
})
}

View File

@@ -69,11 +69,11 @@ func TestGetListApplications(t *testing.T) {
t.Fatalf("unable add APIs to scheme: %v", err)
}
stopCh := make(chan struct{})
ctx := context.Background()
ce, _ := cache.New(cfg, cache.Options{Scheme: sch})
go ce.Start(stopCh)
ce.WaitForCacheSync(stopCh)
go ce.Start(ctx)
ce.WaitForCacheSync(ctx)
c, _ = client.New(cfg, client.Options{Scheme: sch})
@@ -98,7 +98,7 @@ func TestGetListApplications(t *testing.T) {
},
}
ctx := context.TODO()
// ctx := context.TODO()
createNamespace(ns, ctx)
for _, app := range testCases {