diff --git a/pkg/controller/devopsproject/devopsproject_controller.go b/pkg/controller/devopsproject/devopsproject_controller.go index 0cbf3a90c..8db408ffc 100644 --- a/pkg/controller/devopsproject/devopsproject_controller.go +++ b/pkg/controller/devopsproject/devopsproject_controller.go @@ -24,6 +24,10 @@ import ( devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha3" ) +/** + DevOps project controller is used to maintain the state of the DevOps project. +*/ + type Controller struct { client clientset.Interface kubesphereClient kubesphereclient.Interface @@ -171,7 +175,10 @@ func (c *Controller) syncHandler(key string) error { klog.Error(err, fmt.Sprintf("could not get devopsproject %s ", key)) return err } + // DeletionTimestamp.IsZero() means DevOps project has not been deleted. if project.ObjectMeta.DeletionTimestamp.IsZero() { + // Use Finalizers to sync DevOps status when DevOps project was deleted + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers if !sliceutil.HasString(project.ObjectMeta.Finalizers, devopsv1alpha3.DevOpsProjectFinalizerName) { project.ObjectMeta.Finalizers = append(project.ObjectMeta.Finalizers, devopsv1alpha3.DevOpsProjectFinalizerName) _, err := c.kubesphereClient.DevopsV1alpha3().DevOpsProjects().Update(project) @@ -180,6 +187,7 @@ func (c *Controller) syncHandler(key string) error { return err } } + // Check project exists, otherwise we will create it. _, err := c.devopsClient.GetDevOpsProject(key) if err != nil && devopsClient.GetDevOpsStatusCode(err) != http.StatusNotFound { klog.Error(err, fmt.Sprintf("failed to get project %s ", key)) @@ -192,6 +200,7 @@ func (c *Controller) syncHandler(key string) error { } } } else { + // Finalizers processing logic if sliceutil.HasString(project.ObjectMeta.Finalizers, devopsv1alpha3.DevOpsProjectFinalizerName) { _, err := c.devopsClient.GetDevOpsProject(key) if err != nil && devopsClient.GetDevOpsStatusCode(err) != http.StatusNotFound { diff --git a/pkg/controller/s2ibinary/s2ibinary_controller.go b/pkg/controller/s2ibinary/s2ibinary_controller.go index d8e155d98..53d838ec1 100644 --- a/pkg/controller/s2ibinary/s2ibinary_controller.go +++ b/pkg/controller/s2ibinary/s2ibinary_controller.go @@ -23,7 +23,11 @@ import ( devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha1" ) -type S2iBinaryController struct { +/** +s2ibinary-controller used to handle s2ibinary's delete logic. +s2ibinary creation and file upload provided by kubesphere/kapis +*/ +type Controller struct { client clientset.Interface devopsClient devopsclient.Interface @@ -43,7 +47,7 @@ type S2iBinaryController struct { func NewController(client clientset.Interface, devopsclientset devopsclient.Interface, s2ibinInformer devopsinformers.S2iBinaryInformer, - s3Client s3.Interface) *S2iBinaryController { + s3Client s3.Interface) *Controller { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(func(format string, args ...interface{}) { @@ -52,7 +56,7 @@ func NewController(client clientset.Interface, broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "s2ibinary-controller"}) - v := &S2iBinaryController{ + v := &Controller{ client: client, devopsClient: devopsclientset, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "s2ibinary"), @@ -83,7 +87,7 @@ func NewController(client clientset.Interface, // enqueueS2iBinary takes a Foo resource and converts it into a namespace/name // string which is then put onto the work workqueue. This method should *not* be // passed resources of any type other than S2iBinary. -func (c *S2iBinaryController) enqueueS2iBinary(obj interface{}) { +func (c *Controller) enqueueS2iBinary(obj interface{}) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { @@ -93,7 +97,7 @@ func (c *S2iBinaryController) enqueueS2iBinary(obj interface{}) { c.workqueue.Add(key) } -func (c *S2iBinaryController) processNextWorkItem() bool { +func (c *Controller) processNextWorkItem() bool { obj, shutdown := c.workqueue.Get() if shutdown { @@ -128,17 +132,17 @@ func (c *S2iBinaryController) processNextWorkItem() bool { return true } -func (c *S2iBinaryController) worker() { +func (c *Controller) worker() { for c.processNextWorkItem() { } } -func (c *S2iBinaryController) Start(stopCh <-chan struct{}) error { +func (c *Controller) Start(stopCh <-chan struct{}) error { return c.Run(1, stopCh) } -func (c *S2iBinaryController) Run(workers int, stopCh <-chan struct{}) error { +func (c *Controller) Run(workers int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() @@ -160,7 +164,7 @@ func (c *S2iBinaryController) Run(workers int, stopCh <-chan struct{}) error { // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Foo resource // with the current status of the resource. -func (c *S2iBinaryController) syncHandler(key string) error { +func (c *Controller) syncHandler(key string) error { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { klog.Error(err, fmt.Sprintf("could not split s2ibin meta %s ", key)) @@ -205,7 +209,7 @@ func (c *S2iBinaryController) syncHandler(key string) error { return nil } -func (c *S2iBinaryController) deleteBinaryInS3(s2ibin *devopsv1alpha1.S2iBinary) error { +func (c *Controller) deleteBinaryInS3(s2ibin *devopsv1alpha1.S2iBinary) error { key := fmt.Sprintf("%s-%s", s2ibin.Namespace, s2ibin.Name) err := c.s3Client.Delete(key) diff --git a/pkg/controller/s2ibinary/s2ibinary_controller_test.go b/pkg/controller/s2ibinary/s2ibinary_controller_test.go index 50219f895..49be1f59e 100644 --- a/pkg/controller/s2ibinary/s2ibinary_controller_test.go +++ b/pkg/controller/s2ibinary/s2ibinary_controller_test.go @@ -69,7 +69,7 @@ func newDeletingS2iBinary(name string) *s2i.S2iBinary { } } -func (f *fixture) newController() (*S2iBinaryController, informers.SharedInformerFactory, *fakeS3.FakeS3) { +func (f *fixture) newController() (*Controller, informers.SharedInformerFactory, *fakeS3.FakeS3) { f.client = fake.NewSimpleClientset(f.objects...) f.kubeclient = k8sfake.NewSimpleClientset() diff --git a/pkg/controller/s2irun/s2irun_controller.go b/pkg/controller/s2irun/s2irun_controller.go index 650f612bb..34327f634 100644 --- a/pkg/controller/s2irun/s2irun_controller.go +++ b/pkg/controller/s2irun/s2irun_controller.go @@ -24,6 +24,10 @@ import ( devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha1" ) +/** + s2irun-controller used to handle s2irun's delete logic. + s2irun creation and operation provided by s2ioperator +*/ type Controller struct { client clientset.Interface @@ -214,6 +218,11 @@ func (c Controller) syncHandler(key string) error { return nil } +/** + DeleteS2iBinary mainly cleans up two parts of S2iBinary + 1. s2ibinary bound to s2irun + 2. s2ibinary that has been created for more than 24 hours but has not been used +*/ func (c Controller) DeleteS2iBinary(s2irun *devopsv1alpha1.S2iRun) error { s2iBinName := s2irun.Labels[devopsv1alpha1.S2iBinaryLabelKey] s2iBin, err := c.s2iBinaryLister.S2iBinaries(s2irun.Namespace).Get(s2iBinName)