refactor application controller
Signed-off-by: zackzhang <zackzhang@yunify.com>
This commit is contained in:
@@ -18,262 +18,124 @@ package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1beta12 "k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
informersv1 "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
listersv1 "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
log "k8s.io/klog"
|
||||
servicemeshinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh/v1alpha2"
|
||||
servicemeshlisters "kubesphere.io/kubesphere/pkg/client/listers/servicemesh/v1alpha2"
|
||||
"k8s.io/klog"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/controller/virtualservice/util"
|
||||
applicationclient "kubesphere.io/kubesphere/pkg/simple/client/app/clientset/versioned"
|
||||
applicationinformers "kubesphere.io/kubesphere/pkg/simple/client/app/informers/externalversions/app/v1beta1"
|
||||
applicationlister "kubesphere.io/kubesphere/pkg/simple/client/app/listers/app/v1beta1"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
|
||||
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
|
||||
// sequence of delays between successive queuings of a service.
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
)
|
||||
|
||||
type ApplicationController struct {
|
||||
client clientset.Interface
|
||||
|
||||
applicationClient applicationclient.Interface
|
||||
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
applicationLister applicationlister.ApplicationLister
|
||||
applicationSynced cache.InformerSynced
|
||||
|
||||
serviceLister corelisters.ServiceLister
|
||||
serviceSynced cache.InformerSynced
|
||||
|
||||
deploymentLister listersv1.DeploymentLister
|
||||
deploymentSynced cache.InformerSynced
|
||||
|
||||
statefulSetLister listersv1.StatefulSetLister
|
||||
statefulSetSynced cache.InformerSynced
|
||||
|
||||
strategyLister servicemeshlisters.StrategyLister
|
||||
strategySynced cache.InformerSynced
|
||||
|
||||
servicePolicyLister servicemeshlisters.ServicePolicyLister
|
||||
servicePolicySynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
workerLoopPeriod time.Duration
|
||||
// Add creates a new Workspace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
|
||||
func NewApplicationController(serviceInformer coreinformers.ServiceInformer,
|
||||
deploymentInformer informersv1.DeploymentInformer,
|
||||
statefulSetInformer informersv1.StatefulSetInformer,
|
||||
strategyInformer servicemeshinformers.StrategyInformer,
|
||||
servicePolicyInformer servicemeshinformers.ServicePolicyInformer,
|
||||
applicationInformer applicationinformers.ApplicationInformer,
|
||||
client clientset.Interface,
|
||||
applicationClient applicationclient.Interface) *ApplicationController {
|
||||
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(func(format string, args ...interface{}) {
|
||||
log.Info(fmt.Sprintf(format, args))
|
||||
})
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "application-controller"})
|
||||
|
||||
v := &ApplicationController{
|
||||
client: client,
|
||||
applicationClient: applicationClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "application"),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
v.deploymentLister = deploymentInformer.Lister()
|
||||
v.deploymentSynced = deploymentInformer.Informer().HasSynced
|
||||
|
||||
deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: v.enqueueObject,
|
||||
UpdateFunc: func(old, new interface{}) { v.enqueueObject(new) },
|
||||
DeleteFunc: v.enqueueObject,
|
||||
})
|
||||
|
||||
v.statefulSetLister = statefulSetInformer.Lister()
|
||||
v.statefulSetSynced = statefulSetInformer.Informer().HasSynced
|
||||
|
||||
statefulSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: v.enqueueObject,
|
||||
UpdateFunc: func(old, new interface{}) { v.enqueueObject(new) },
|
||||
DeleteFunc: v.enqueueObject,
|
||||
})
|
||||
|
||||
v.serviceLister = serviceInformer.Lister()
|
||||
v.serviceSynced = serviceInformer.Informer().HasSynced
|
||||
|
||||
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: v.enqueueObject,
|
||||
UpdateFunc: func(old, new interface{}) { v.enqueueObject(new) },
|
||||
DeleteFunc: v.enqueueObject,
|
||||
})
|
||||
|
||||
v.strategyLister = strategyInformer.Lister()
|
||||
v.strategySynced = strategyInformer.Informer().HasSynced
|
||||
|
||||
strategyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: v.enqueueObject,
|
||||
UpdateFunc: func(old, new interface{}) { v.enqueueObject(new) },
|
||||
DeleteFunc: v.enqueueObject,
|
||||
})
|
||||
|
||||
v.servicePolicyLister = servicePolicyInformer.Lister()
|
||||
v.servicePolicySynced = servicePolicyInformer.Informer().HasSynced
|
||||
|
||||
servicePolicyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: v.enqueueObject,
|
||||
UpdateFunc: func(old, new interface{}) { v.enqueueObject(new) },
|
||||
DeleteFunc: v.enqueueObject,
|
||||
})
|
||||
|
||||
v.applicationLister = applicationInformer.Lister()
|
||||
v.applicationSynced = applicationInformer.Informer().HasSynced
|
||||
|
||||
v.eventBroadcaster = broadcaster
|
||||
v.eventRecorder = recorder
|
||||
|
||||
return v
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileApplication{Client: mgr.GetClient(), scheme: mgr.GetScheme(),
|
||||
recorder: mgr.GetEventRecorderFor("application-controller")}
|
||||
}
|
||||
|
||||
func (v *ApplicationController) Start(stopCh <-chan struct{}) error {
|
||||
return v.Run(2, stopCh)
|
||||
}
|
||||
|
||||
func (v *ApplicationController) Run(workers int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer v.queue.ShutDown()
|
||||
|
||||
log.Info("starting application controller")
|
||||
defer log.Info("shutting down application controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, v.deploymentSynced, v.statefulSetSynced, v.serviceSynced, v.strategySynced, v.servicePolicySynced, v.applicationSynced) {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(v.worker, v.workerLoopPeriod, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ApplicationController) worker() {
|
||||
|
||||
for v.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (v *ApplicationController) processNextWorkItem() bool {
|
||||
eKey, quit := v.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
|
||||
defer v.queue.Done(eKey)
|
||||
|
||||
err := v.syncApplication(eKey.(string))
|
||||
v.handleErr(err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *ApplicationController) syncApplication(key string) error {
|
||||
startTime := time.Now()
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("application-controller", mgr, controller.Options{Reconciler: r})
|
||||
if err != nil {
|
||||
log.Error(err, "not a valid controller key", "key", key)
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
log.V(4).Info("Finished updating application.", "namespace", namespace, "name", name, "duration", time.Since(startTime))
|
||||
}()
|
||||
|
||||
application, err := v.applicationLister.Applications(namespace).Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// application has been deleted
|
||||
return nil
|
||||
}
|
||||
log.Error(err, "get application failed")
|
||||
sources := []runtime.Object{
|
||||
&v1.Deployment{},
|
||||
&corev1.Service{},
|
||||
&v1.StatefulSet{},
|
||||
&v1beta12.Ingress{},
|
||||
&servicemeshv1alpha2.ServicePolicy{},
|
||||
&servicemeshv1alpha2.Strategy{},
|
||||
}
|
||||
|
||||
annotations := application.GetAnnotations()
|
||||
for _, s := range sources {
|
||||
// Watch for changes to Application
|
||||
err = c.Watch(&source.Kind{Type: s},
|
||||
&handler.EnqueueRequestForOwner{OwnerType: &v1beta1.Application{}, IsController: false},
|
||||
predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return isApp(e.MetaOld)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return isApp(e.Meta)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return isApp(e.Meta)
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileApplication{}
|
||||
|
||||
// ReconcileApplication reconciles a Workspace object
|
||||
type ReconcileApplication struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=app.k8s.io,resources=applications,verbs=get;list;watch;create;update;patch;delete
|
||||
func (r *ReconcileApplication) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
// Fetch the Application instance
|
||||
ctx := context.Background()
|
||||
app := &v1beta1.Application{}
|
||||
err := r.Get(ctx, request.NamespacedName, app)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// add specified annotation for app when triggered by sub-resources,
|
||||
// so the application in sigs.k8s.io can reconcile to update status
|
||||
annotations := app.GetObjectMeta().GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
annotations["kubesphere.io/last-updated"] = time.Now().String()
|
||||
application.SetAnnotations(annotations)
|
||||
|
||||
_, err = v.applicationClient.AppV1beta1().Applications(namespace).Update(context.Background(), application, metav1.UpdateOptions{})
|
||||
app.SetAnnotations(annotations)
|
||||
err = r.Update(ctx, app)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
log.V(4).Info("application has been deleted during update")
|
||||
return nil
|
||||
klog.V(4).Info("application has been deleted during update")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
log.Error(err, "failed to update application", "namespace", namespace, "name", name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (v *ApplicationController) enqueueObject(obj interface{}) {
|
||||
var resource = obj.(metav1.Object)
|
||||
|
||||
if resource.GetLabels() == nil || !util.IsApplicationComponent(resource.GetLabels()) {
|
||||
return
|
||||
}
|
||||
|
||||
applicationName := util.GetApplictionName(resource.GetLabels())
|
||||
|
||||
if len(applicationName) > 0 {
|
||||
key := resource.GetNamespace() + "/" + applicationName
|
||||
v.queue.Add(key)
|
||||
func isApp(o metav1.Object) bool {
|
||||
if o.GetLabels() == nil || !util.IsApplicationComponent(o.GetLabels()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (v *ApplicationController) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
v.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if v.queue.NumRequeues(key) < maxRetries {
|
||||
log.V(2).Info("Error syncing virtualservice for service retrying.", "key", key, "error", err)
|
||||
v.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
log.V(4).Info("Dropping service out of the queue.", "key", key, "error", err)
|
||||
v.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
@@ -325,7 +326,7 @@ func (c *clusterController) reconcileHostCluster() error {
|
||||
// no host cluster, create one
|
||||
if len(clusters) == 0 {
|
||||
hostCluster.Spec.Connection.KubeConfig = hostKubeConfig
|
||||
_, err = c.clusterClient.Create(hostCluster)
|
||||
_, err = c.clusterClient.Create(context.TODO(), hostCluster, metav1.CreateOptions{})
|
||||
return err
|
||||
} else if len(clusters) > 1 {
|
||||
return fmt.Errorf("there MUST not be more than one host clusters, while there are %d", len(clusters))
|
||||
@@ -349,7 +350,7 @@ func (c *clusterController) reconcileHostCluster() error {
|
||||
}
|
||||
|
||||
// update host cluster config
|
||||
_, err = c.clusterClient.Update(cluster)
|
||||
_, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -387,7 +388,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
// registering our finalizer.
|
||||
if !sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) {
|
||||
cluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterv1alpha1.Finalizer)
|
||||
if cluster, err = c.clusterClient.Update(cluster); err != nil {
|
||||
if cluster, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -403,7 +404,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{})
|
||||
_, err = c.client.CoreV1().Services(defaultAgentNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// nothing to do
|
||||
@@ -412,7 +413,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = c.client.CoreV1().Services(defaultAgentNamespace).Delete(serviceName, metav1.NewDeleteOptions(0))
|
||||
err = c.client.CoreV1().Services(defaultAgentNamespace).Delete(context.TODO(), serviceName, *metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to delete service %s, error %v", serviceName, err)
|
||||
return err
|
||||
@@ -435,7 +436,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
finalizers := sets.NewString(cluster.ObjectMeta.Finalizers...)
|
||||
finalizers.Delete(clusterv1alpha1.Finalizer)
|
||||
cluster.ObjectMeta.Finalizers = finalizers.List()
|
||||
if _, err = c.clusterClient.Update(cluster); err != nil {
|
||||
if _, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -504,10 +505,10 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
},
|
||||
}
|
||||
|
||||
service, err := c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{})
|
||||
service, err := c.client.CoreV1().Services(defaultAgentNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||
if err != nil { // proxy service not found
|
||||
if errors.IsNotFound(err) {
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Create(&mcService)
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Create(context.TODO(), &mcService, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -519,7 +520,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
mcService.ObjectMeta = service.ObjectMeta
|
||||
mcService.Spec.ClusterIP = service.Spec.ClusterIP
|
||||
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Update(&mcService)
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Update(context.TODO(), &mcService, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -544,7 +545,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldCluster, cluster) {
|
||||
cluster, err = c.clusterClient.Update(cluster)
|
||||
cluster, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err)
|
||||
return err
|
||||
@@ -567,7 +568,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
|
||||
c.updateClusterCondition(cluster, clusterNotReadyCondition)
|
||||
|
||||
cluster, err = c.clusterClient.Update(cluster)
|
||||
cluster, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err)
|
||||
}
|
||||
@@ -630,7 +631,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
|
||||
cluster.Status.KubernetesVersion = version.GitVersion
|
||||
|
||||
nodes, err := clusterDt.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
nodes, err := clusterDt.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get cluster nodes, %#v", err)
|
||||
return err
|
||||
@@ -679,7 +680,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldCluster, cluster) {
|
||||
_, err = c.clusterClient.Update(cluster)
|
||||
_, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update cluster status, %#v", err)
|
||||
return err
|
||||
@@ -690,7 +691,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
}
|
||||
|
||||
func (c *clusterController) checkIfClusterIsHostCluster(memberClusterNodes *v1.NodeList) bool {
|
||||
hostNodes, err := c.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
hostNodes, err := c.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -765,7 +765,7 @@ func checkWorkspaces(clusterConfig *rest.Config, hostClient client.Client, clust
|
||||
return err
|
||||
}
|
||||
|
||||
workspaces, err := tenantclient.Workspaces().List(metav1.ListOptions{LabelSelector: kubefedManagedSelector})
|
||||
workspaces, err := tenantclient.Workspaces().List(context.TODO(), metav1.ListOptions{LabelSelector: kubefedManagedSelector})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ func (f *fixture) runController(user string, startInformers bool, expectError bo
|
||||
actions := filterInformerActions(f.ksclient.Actions())
|
||||
for j, action := range actions {
|
||||
if len(f.actions) < j+1 {
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions)
|
||||
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[j:])
|
||||
break
|
||||
}
|
||||
|
||||
@@ -180,7 +180,8 @@ func (f *fixture) runController(user string, startInformers bool, expectError bo
|
||||
func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
|
||||
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
|
||||
return
|
||||
//return
|
||||
// TODO : failed sometimes, need to be verified by hongming
|
||||
}
|
||||
|
||||
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package ippool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
@@ -92,7 +93,7 @@ func (c *IPPoolController) addFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
networkv1alpha1.IPPoolTypeLabel: clone.Spec.Type,
|
||||
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", clone.ID()),
|
||||
}
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Error adding finalizer to pool %s: %v", pool.Name, err)
|
||||
return err
|
||||
@@ -104,7 +105,7 @@ func (c *IPPoolController) addFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
func (c *IPPoolController) removeFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
clone := pool.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(clone, networkv1alpha1.IPPoolFinalizer)
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Error removing finalizer from pool %s: %v", pool.Name, err)
|
||||
return err
|
||||
@@ -144,7 +145,7 @@ func (c *IPPoolController) ValidateCreate(obj runtime.Object) error {
|
||||
}
|
||||
}
|
||||
|
||||
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(metav1.ListOptions{
|
||||
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", b.ID()),
|
||||
}).String(),
|
||||
@@ -203,7 +204,7 @@ func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
|
||||
clone := old.DeepCopy()
|
||||
clone.Spec.Disabled = true
|
||||
|
||||
old, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
old, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -218,7 +219,7 @@ func (c *IPPoolController) updateIPPoolStatus(old *networkv1alpha1.IPPool) error
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = c.kubesphereClient.NetworkV1alpha1().IPPools().UpdateStatus(new)
|
||||
_, err = c.kubesphereClient.NetworkV1alpha1().IPPools().UpdateStatus(context.TODO(), new, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ippool %s status %v", old.Name, err)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package ippool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -94,11 +95,11 @@ var _ = Describe("test ippool", func() {
|
||||
Expect(c.ValidateCreate(clone)).Should(HaveOccurred())
|
||||
|
||||
clone = pool.DeepCopy()
|
||||
_, err := ksclient.NetworkV1alpha1().IPPools().Create(clone)
|
||||
_, err := ksclient.NetworkV1alpha1().IPPools().Create(context.TODO(), clone, v1.CreateOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
if len(result.Labels) != 3 {
|
||||
return false
|
||||
}
|
||||
@@ -115,7 +116,7 @@ var _ = Describe("test ippool", func() {
|
||||
})
|
||||
|
||||
It("test update ippool", func() {
|
||||
old, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
old, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
new := old.DeepCopy()
|
||||
new.Spec.CIDR = "192.168.1.0/24"
|
||||
Expect(c.ValidateUpdate(old, new)).Should(HaveOccurred())
|
||||
@@ -129,7 +130,7 @@ var _ = Describe("test ippool", func() {
|
||||
})
|
||||
|
||||
Eventually(func() bool {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
if result.Status.Allocations != 1 {
|
||||
return false
|
||||
}
|
||||
@@ -139,12 +140,12 @@ var _ = Describe("test ippool", func() {
|
||||
})
|
||||
|
||||
It("test delete pool", func() {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
Expect(c.ValidateDelete(result)).Should(HaveOccurred())
|
||||
|
||||
ipamClient.ReleaseByHandle("testhandle")
|
||||
Eventually(func() bool {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
if result.Status.Allocations != 0 {
|
||||
return false
|
||||
}
|
||||
@@ -152,9 +153,9 @@ var _ = Describe("test ippool", func() {
|
||||
return true
|
||||
}, 3*time.Second).Should(Equal(true))
|
||||
|
||||
err := ksclient.NetworkV1alpha1().IPPools().Delete(pool.Name, &v1.DeleteOptions{})
|
||||
err := ksclient.NetworkV1alpha1().IPPools().Delete(context.TODO(), pool.Name, v1.DeleteOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
blocks, _ := ksclient.NetworkV1alpha1().IPAMBlocks().List(v1.ListOptions{})
|
||||
blocks, _ := ksclient.NetworkV1alpha1().IPAMBlocks().List(context.TODO(), v1.ListOptions{})
|
||||
Expect(len(blocks.Items)).Should(Equal(0))
|
||||
})
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user