feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -0,0 +1,134 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"strings"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
categoryController = "app-category"
categoryFinalizer = "categories.application.kubesphere.io"
)
var _ reconcile.Reconciler = &AppCategoryReconciler{}
var _ kscontroller.Controller = &AppCategoryReconciler{}
type AppCategoryReconciler struct {
client.Client
}
func (r *AppCategoryReconciler) Name() string {
return categoryController
}
func (r *AppCategoryReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *AppCategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
return ctrl.NewControllerManagedBy(mgr).
Named(categoryController).
For(&appv2.Category{}).
Watches(
&appv2.Application{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request {
var requests []reconcile.Request
app := object.(*appv2.Application)
if categoryID := app.Labels[appv2.AppCategoryNameKey]; categoryID != "" {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{Name: categoryID},
})
}
return requests
}),
builder.WithPredicates(predicate.LabelChangedPredicate{}),
).
Complete(r)
}
func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.V(4).Info("reconcile", "app category", req.String())
category := &appv2.Category{}
if err := r.Client.Get(ctx, req.NamespacedName, category); err != nil {
if errors.IsNotFound(err) {
if req.Name == appv2.UncategorizedCategoryID {
return reconcile.Result{}, r.ensureUncategorizedCategory()
}
// ignore exceptions caused by incorrectly adding app labels.
klog.Errorf("not found %s, check if you added the correct app category", req.String())
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
if !controllerutil.ContainsFinalizer(category, categoryFinalizer) {
category.ObjectMeta.Finalizers = append(category.ObjectMeta.Finalizers, categoryFinalizer)
return ctrl.Result{}, r.Update(ctx, category)
}
if !category.ObjectMeta.DeletionTimestamp.IsZero() {
// our finalizer is present, so lets handle our external dependency
// remove our finalizer from the list and update it.
if category.Status.Total > 0 {
klog.Errorf("can not delete helm category: %s which owns applications", req.String())
return reconcile.Result{}, nil
}
controllerutil.RemoveFinalizer(category, categoryFinalizer)
return reconcile.Result{}, r.Update(ctx, category)
}
apps := &appv2.ApplicationList{}
opts := client.MatchingLabels{
appv2.AppCategoryNameKey: category.Name,
appv2.RepoIDLabelKey: appv2.UploadRepoKey,
}
if err := r.List(ctx, apps, opts); err != nil {
klog.Errorf("failed to list apps: %v", err)
return ctrl.Result{}, err
}
if category.Status.Total != len(apps.Items) {
category.Status.Total = len(apps.Items)
if err := r.Status().Update(ctx, category); err != nil {
klog.Errorf("failed to update category status: %v", err)
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
}
func (r *AppCategoryReconciler) ensureUncategorizedCategory() error {
ctg := &appv2.Category{}
err := r.Get(context.TODO(), types.NamespacedName{Name: appv2.UncategorizedCategoryID}, ctg)
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("failed to get uncategorized category: %v", err)
return err
}
ctg.Name = appv2.UncategorizedCategoryID
return r.Create(context.TODO(), ctg)
}

View File

@@ -1,307 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"fmt"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
servicemeshv1alpha2 "kubesphere.io/api/servicemesh/v1alpha2"
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
)
// ApplicationReconciler reconciles a Application object
type ApplicationReconciler struct {
client.Client
Mapper meta.RESTMapper
Scheme *runtime.Scheme
ApplicationSelector labels.Selector //
}
func (r *ApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var app appv1beta1.Application
err := r.Get(context.Background(), req.NamespacedName, &app)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// If label selector were given, only reconcile matched applications
// match annotations and labels
if !r.ApplicationSelector.Empty() {
if !r.ApplicationSelector.Matches(labels.Set(app.Labels)) &&
!r.ApplicationSelector.Matches(labels.Set(app.Annotations)) {
return ctrl.Result{}, err
}
}
// Application is in the process of being deleted, so no need to do anything.
if app.DeletionTimestamp != nil {
return ctrl.Result{}, nil
}
resources, errs := r.updateComponents(context.Background(), &app)
newApplicationStatus := r.getNewApplicationStatus(context.Background(), &app, resources, &errs)
newApplicationStatus.ObservedGeneration = app.Generation
if equality.Semantic.DeepEqual(newApplicationStatus, &app.Status) {
return ctrl.Result{}, nil
}
err = r.updateApplicationStatus(context.Background(), req.NamespacedName, newApplicationStatus)
return ctrl.Result{}, err
}
func (r *ApplicationReconciler) updateComponents(ctx context.Context, app *appv1beta1.Application) ([]*unstructured.Unstructured, []error) {
var errs []error
resources := r.fetchComponentListResources(ctx, app.Spec.ComponentGroupKinds, app.Spec.Selector, app.Namespace, &errs)
if app.Spec.AddOwnerRef {
ownerRef := metav1.NewControllerRef(app, appv1beta1.GroupVersion.WithKind("Application"))
*ownerRef.Controller = false
if err := r.setOwnerRefForResources(ctx, *ownerRef, resources); err != nil {
errs = append(errs, err)
}
}
return resources, errs
}
func (r *ApplicationReconciler) getNewApplicationStatus(ctx context.Context, app *appv1beta1.Application, resources []*unstructured.Unstructured, errList *[]error) *appv1beta1.ApplicationStatus {
objectStatuses := r.objectStatuses(ctx, resources, errList)
errs := utilerrors.NewAggregate(*errList)
aggReady, countReady := aggregateReady(objectStatuses)
newApplicationStatus := app.Status.DeepCopy()
newApplicationStatus.ComponentList = appv1beta1.ComponentList{
Objects: objectStatuses,
}
newApplicationStatus.ComponentsReady = fmt.Sprintf("%d/%d", countReady, len(objectStatuses))
if errs != nil {
setReadyUnknownCondition(newApplicationStatus, "ComponentsReadyUnknown", "failed to aggregate all components' statuses, check the Error condition for details")
} else if aggReady {
setReadyCondition(newApplicationStatus, "ComponentsReady", "all components ready")
} else {
setNotReadyCondition(newApplicationStatus, "ComponentsNotReady", fmt.Sprintf("%d components not ready", len(objectStatuses)-countReady))
}
if errs != nil {
setErrorCondition(newApplicationStatus, "ErrorSeen", errs.Error())
} else {
clearErrorCondition(newApplicationStatus)
}
return newApplicationStatus
}
func (r *ApplicationReconciler) fetchComponentListResources(ctx context.Context, groupKinds []metav1.GroupKind, selector *metav1.LabelSelector, namespace string, errs *[]error) []*unstructured.Unstructured {
var resources []*unstructured.Unstructured
if selector == nil {
klog.V(2).Info("No selector is specified")
return resources
}
for _, gk := range groupKinds {
mapping, err := r.Mapper.RESTMapping(schema.GroupKind{
Group: appv1beta1.StripVersion(gk.Group),
Kind: gk.Kind,
})
if err != nil {
klog.V(2).Info("NoMappingForGK", "gk", gk.String())
continue
}
list := &unstructured.UnstructuredList{}
list.SetGroupVersionKind(mapping.GroupVersionKind)
if err = r.Client.List(ctx, list, client.InNamespace(namespace), client.MatchingLabels(selector.MatchLabels)); err != nil {
klog.Error(err, "unable to list resources for GVK", "gvk", mapping.GroupVersionKind)
*errs = append(*errs, err)
continue
}
for _, u := range list.Items {
resource := u
resources = append(resources, &resource)
}
}
return resources
}
func (r *ApplicationReconciler) setOwnerRefForResources(ctx context.Context, ownerRef metav1.OwnerReference, resources []*unstructured.Unstructured) error {
for _, resource := range resources {
ownerRefs := resource.GetOwnerReferences()
ownerRefFound := false
for i, refs := range ownerRefs {
if ownerRef.Kind == refs.Kind &&
ownerRef.APIVersion == refs.APIVersion &&
ownerRef.Name == refs.Name {
ownerRefFound = true
if ownerRef.UID != refs.UID {
ownerRefs[i] = ownerRef
}
}
}
if !ownerRefFound {
ownerRefs = append(ownerRefs, ownerRef)
}
resource.SetOwnerReferences(ownerRefs)
err := r.Client.Update(ctx, resource)
if err != nil {
// We log this error, but we continue and try to set the ownerRefs on the other resources.
klog.Error(err, "ErrorSettingOwnerRef", "gvk", resource.GroupVersionKind().String(),
"namespace", resource.GetNamespace(), "name", resource.GetName())
}
}
return nil
}
func (r *ApplicationReconciler) objectStatuses(ctx context.Context, resources []*unstructured.Unstructured, errs *[]error) []appv1beta1.ObjectStatus {
var objectStatuses []appv1beta1.ObjectStatus
for _, resource := range resources {
os := appv1beta1.ObjectStatus{
Group: resource.GroupVersionKind().Group,
Kind: resource.GetKind(),
Name: resource.GetName(),
Link: resource.GetSelfLink(),
}
s, err := status(resource)
if err != nil {
klog.Error(err, "unable to compute status for resource", "gvk", resource.GroupVersionKind().String(),
"namespace", resource.GetNamespace(), "name", resource.GetName())
*errs = append(*errs, err)
}
os.Status = s
objectStatuses = append(objectStatuses, os)
}
return objectStatuses
}
func aggregateReady(objectStatuses []appv1beta1.ObjectStatus) (bool, int) {
countReady := 0
for _, os := range objectStatuses {
if os.Status == StatusReady {
countReady++
}
}
if countReady == len(objectStatuses) {
return true, countReady
}
return false, countReady
}
func (r *ApplicationReconciler) updateApplicationStatus(ctx context.Context, nn types.NamespacedName, status *appv1beta1.ApplicationStatus) error {
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
original := &appv1beta1.Application{}
if err := r.Get(ctx, nn, original); err != nil {
return err
}
original.Status = *status
if err := r.Client.Status().Update(ctx, original); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("failed to update status of Application %s/%s: %v", nn.Namespace, nn.Name, err)
}
return nil
}
func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
c, err := ctrl.NewControllerManagedBy(mgr).
Named("application-controller").
For(&appv1beta1.Application{}).Build(r)
if err != nil {
return err
}
sources := []client.Object{
&v1.Deployment{},
&corev1.Service{},
&v1.StatefulSet{},
&networkv1.Ingress{},
&servicemeshv1alpha2.ServicePolicy{},
&servicemeshv1alpha2.Strategy{},
}
for _, s := range sources {
// Watch for changes to Application
err = c.Watch(
&source.Kind{Type: s},
handler.EnqueueRequestsFromMapFunc(
func(h client.Object) []reconcile.Request {
return []reconcile.Request{{NamespacedName: types.NamespacedName{
Name: servicemesh.GetApplictionName(h.GetLabels()),
Namespace: h.GetNamespace()}}}
}),
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
return isApp(e.ObjectOld, e.ObjectOld)
},
CreateFunc: func(e event.CreateEvent) bool {
return isApp(e.Object)
},
DeleteFunc: func(e event.DeleteEvent) bool {
return isApp(e.Object)
},
})
if err != nil {
return err
}
}
return nil
}
var _ reconcile.Reconciler = &ApplicationReconciler{}
func isApp(obs ...metav1.Object) bool {
for _, o := range obs {
if o.GetLabels() != nil && servicemesh.IsAppComponent(o.GetLabels()) {
return true
}
}
return false
}

View File

@@ -1,263 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/application/api/v1beta1"
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
)
const (
applicationName = "bookinfo"
serviceName = "productpage"
timeout = time.Second * 30
interval = time.Second * 2
)
var replicas = int32(2)
var _ = Context("Inside of a new namespace", func() {
ctx := context.TODO()
ns := SetupTest(ctx)
Describe("Application", func() {
applicationLabels := map[string]string{
"app.kubernetes.io/name": "bookinfo",
"app.kubernetes.io/version": "1",
}
BeforeEach(func() {
By("create deployment,service,application objects")
service := newService(serviceName, ns.Name, applicationLabels)
deployments := []*v1.Deployment{newDeployments(serviceName, ns.Name, applicationLabels, "v1")}
app := newApplication(applicationName, ns.Name, applicationLabels)
Expect(k8sClient.Create(ctx, service.DeepCopy())).Should(Succeed())
for i := range deployments {
deployment := deployments[i]
Expect(k8sClient.Create(ctx, deployment.DeepCopy())).Should(Succeed())
}
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
})
Context("Application Controller", func() {
It("Should not reconcile application", func() {
By("update application labels")
application := &v1beta1.Application{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
Expect(err).Should(Succeed())
updateApplication := func(object interface{}) {
newApp := object.(*v1beta1.Application)
newApp.Labels["kubesphere.io/creator"] = ""
}
updated, err := updateWithRetries(k8sClient, ctx, application.Namespace, applicationName, updateApplication, 1*time.Second, 5*time.Second)
Expect(updated).Should(BeTrue())
Eventually(func() bool {
err = k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
// application status field should not be populated with selected deployments and services
return len(application.Status.ComponentList.Objects) == 0
}, timeout, interval).Should(BeTrue())
})
It("Should reconcile application successfully", func() {
By("check if application status been updated by controller")
application := &v1beta1.Application{}
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
Expect(err).Should(Succeed())
// application status field should be populated by controller
return len(application.Status.ComponentList.Objects) > 0
}, timeout, interval).Should(BeTrue())
})
})
})
})
type UpdateObjectFunc func(obj interface{})
func updateWithRetries(client client.Client, ctx context.Context, namespace, name string, updateFunc UpdateObjectFunc, interval, timeout time.Duration) (bool, error) {
var updateErr error
pollErr := wait.PollImmediate(interval, timeout, func() (done bool, err error) {
app := &v1beta1.Application{}
if err = client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, app); err != nil {
return false, err
}
updateFunc(app)
if err = client.Update(ctx, app); err == nil {
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided update to object %q: %v", name, updateErr)
return false, pollErr
}
return true, nil
}
func newDeployments(deploymentName, namespace string, labels map[string]string, version string) *v1.Deployment {
labels["app"] = deploymentName
labels["version"] = version
deployment := &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", deploymentName, version),
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
},
Spec: v1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "c1",
Image: "nginx:latest",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
ContainerPort: 443,
Protocol: corev1.ProtocolTCP,
},
{
Name: "mysql",
ContainerPort: 3306,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
},
},
Status: v1.DeploymentStatus{
AvailableReplicas: replicas,
ReadyReplicas: replicas,
Replicas: replicas,
},
}
return deployment
}
func newService(serviceName, namespace string, labels map[string]string) *corev1.Service {
labels["app"] = serviceName
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{
"servicemesh.kubesphere.io/enabled": "true",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
Port: 443,
Protocol: corev1.ProtocolTCP,
},
{
Name: "mysql",
Port: 3306,
Protocol: corev1.ProtocolTCP,
},
},
Selector: labels,
Type: corev1.ServiceTypeClusterIP,
},
Status: corev1.ServiceStatus{},
}
return svc
}
func newApplication(applicationName, namespace string, labels map[string]string) *v1beta1.Application {
app := &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: applicationName,
Namespace: namespace,
Labels: labels,
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
},
Spec: v1beta1.ApplicationSpec{
ComponentGroupKinds: []metav1.GroupKind{
{
Group: "",
Kind: "Service",
},
{
Group: "apps",
Kind: "Deployment",
},
},
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
AddOwnerRef: true,
},
}
return app
}

View File

@@ -1,150 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"math/rand"
"path/filepath"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/apis"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
func TestApplicationController(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Application Controller Test Suite")
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(klog.NewKlogr())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "ks-core", "crds")},
AttachControlPlaneOutput: false,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = appv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = apis.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
// SetupTest will setup a testing environment.
// This includes:
// - creating a Namespace to be used during the test
// - starting application controller
// - stopping application controller after the test ends
//
// Call this function at the start of each of your tests.
func SetupTest(ctx context.Context) *corev1.Namespace {
var stopCh chan struct{}
ns := &corev1.Namespace{}
BeforeEach(func() {
stopCh = make(chan struct{})
*ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
}
err := k8sClient.Create(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to create a test namespace")
mgr, err := ctrl.NewManager(cfg, ctrl.Options{MetricsBindAddress: "0"})
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
selector, _ := labels.Parse("app.kubernetes.io/name,!kubesphere.io/creator")
reconciler := &ApplicationReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Mapper: mgr.GetRESTMapper(),
ApplicationSelector: selector,
}
err = reconciler.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup application reconciler")
go func() {
err = mgr.Start(context.Background())
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
}()
})
AfterEach(func() {
close(stopCh)
err := k8sClient.Delete(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
})
return ns
}
func init() {
rand.Seed(time.Now().UnixNano())
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
func randStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}

View File

@@ -0,0 +1,396 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"k8s.io/client-go/rest"
batchv1 "k8s.io/api/batch/v1"
"kubesphere.io/utils/helm"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"kubesphere.io/api/constants"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/handler"
"kubesphere.io/utils/s3"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller"
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
helmrelease "helm.sh/helm/v3/pkg/release"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
"kubesphere.io/kubesphere/pkg/simple/client/application"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
helminstallerController = "apprelease-helminstaller"
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
)
var _ controller.Controller = &AppReleaseReconciler{}
var _ reconcile.Reconciler = &AppReleaseReconciler{}
const (
verificationAgain = 5
timeoutVerificationAgain = 600
timeoutMaxRecheck = 4
)
func (r *AppReleaseReconciler) Name() string {
return helminstallerController
}
func (r *AppReleaseReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
r.HelmExecutorOptions = mgr.HelmExecutorOptions
r.Client = mgr.GetClient()
clusterClientSet, err := clusterclient.NewClusterClientSet(mgr.GetCache())
if err != nil {
return fmt.Errorf("failed to create cluster client set: %v", err)
}
r.clusterClientSet = clusterClientSet
if r.HelmExecutorOptions == nil || r.HelmExecutorOptions.Image == "" {
return fmt.Errorf("helm executor options is nil or image is empty")
}
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
return err
}
return ctrl.NewControllerManagedBy(mgr).Named(helminstallerController).
Watches(
&clusterv1alpha1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.mapper),
builder.WithPredicates(ClusterDeletePredicate{}),
).
WithEventFilter(IgnoreAnnotationChangePredicate{AnnotationKey: appv2.TimeoutRecheck}).
For(&appv2.ApplicationRelease{}).Complete(r)
}
func (r *AppReleaseReconciler) mapper(ctx context.Context, o client.Object) (requests []reconcile.Request) {
cluster := o.(*clusterv1alpha1.Cluster)
klog.Infof("cluster %s has been deleted", cluster.Name)
apprlsList := &appv2.ApplicationReleaseList{}
opts := &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.ClusterNameLabelKey: cluster.Name})}
if err := r.List(ctx, apprlsList, opts); err != nil {
klog.Errorf("failed to list application releases: %v", err)
return requests
}
for _, apprls := range apprlsList.Items {
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: apprls.Name}})
}
return requests
}
type AppReleaseReconciler struct {
client.Client
clusterClientSet clusterclient.Interface
HelmExecutorOptions *kscontroller.HelmExecutorOptions
ossStore s3.Interface
cmStore s3.Interface
}
func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
apprls := &appv2.ApplicationRelease{}
if err := r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
timeoutRecheck := apprls.Annotations[appv2.TimeoutRecheck]
var reCheck int
if timeoutRecheck == "" {
reCheck = 0
} else {
reCheck, _ = strconv.Atoi(timeoutRecheck)
}
dstKubeConfig, runClient, err := r.getClusterInfo(apprls.GetRlsCluster())
if err != nil {
klog.Errorf("failed to get cluster info: %v", err)
return ctrl.Result{}, err
}
executor, err := r.getExecutor(apprls, dstKubeConfig, runClient)
if err != nil {
klog.Errorf("failed to get executor: %v", err)
return ctrl.Result{}, err
}
cluster, err := r.clusterClientSet.Get(apprls.GetRlsCluster())
if err != nil {
klog.Errorf("failed to get cluster: %v", err)
return ctrl.Result{}, err
}
helmKubeConfig, err := application.GetHelmKubeConfig(ctx, cluster, runClient)
if err != nil {
klog.Errorf("failed to get helm kubeconfig: %v", err)
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) || (err == nil && !cluster.DeletionTimestamp.IsZero()) {
klog.Errorf("cluster not found or deleting %s: %v", apprls.GetRlsCluster(), err)
apprls.Status.State = appv2.StatusClusterDeleted
apprls.Status.Message = fmt.Sprintf("cluster %s has been deleted", cluster.Name)
patch, _ := json.Marshal(apprls)
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(apprls, HelmReleaseFinalizer) && apprls.ObjectMeta.DeletionTimestamp.IsZero() {
expected := apprls.DeepCopy()
controllerutil.AddFinalizer(expected, HelmReleaseFinalizer)
klog.Infof("add finalizer for apprelease %s", apprls.Name)
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(apprls))
}
if !apprls.ObjectMeta.DeletionTimestamp.IsZero() {
if apprls.Status.State != appv2.StatusDeleting {
result, err := r.removeAll(ctx, apprls, executor, helmKubeConfig)
if err != nil {
return result, err
}
}
wait, err := r.cleanJob(ctx, apprls, runClient)
if err != nil {
klog.Errorf("failed to clean job: %v", err)
return ctrl.Result{}, err
}
if wait {
klog.Infof("job wait, job for %s is still active", apprls.Name)
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
klog.Infof("job for %s has been cleaned", apprls.Name)
if err = r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
apprls.Finalizers = nil
err = r.Update(ctx, apprls)
if err != nil {
klog.Errorf("failed to remove finalizer for apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("remove finalizer for apprelease %s", apprls.Name)
return ctrl.Result{}, nil
}
if apprls.Status.State == "" {
apprls.Status.SpecHash = apprls.HashSpec()
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusCreating)
}
if apprls.HashSpec() != apprls.Status.SpecHash {
apprls.Status.SpecHash = apprls.HashSpec()
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusUpgrading)
}
if apprls.Status.State == appv2.StatusCreated || apprls.Status.State == appv2.StatusTimeout {
options := []helm.HelmOption{
helm.SetNamespace(apprls.GetRlsNamespace()),
helm.SetKubeconfig(dstKubeConfig),
}
release, err := executor.Get(ctx, apprls.Name, options...)
if err != nil && err.Error() == "release: not found" {
klog.Infof("helm release %s/%s not created yet,check job %s", apprls.GetRlsNamespace(), apprls.Name, apprls.Status.InstallJobName)
job := &batchv1.Job{}
if err = runClient.Get(ctx, types.NamespacedName{Namespace: apprls.GetRlsNamespace(), Name: apprls.Status.InstallJobName}, job); err != nil {
if apierrors.IsNotFound(err) {
klog.Errorf("job %s not found", apprls.Status.InstallJobName)
msg := "deploy failed, job not found"
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusDeployFailed, msg)
}
return ctrl.Result{}, err
}
if job.Status.Failed > 0 {
klog.Infof("install apprls %s job %s , failed times %d/%d", apprls.Name, job.Name, job.Status.Failed, *job.Spec.BackoffLimit+1)
}
if job.Spec.BackoffLimit != nil && job.Status.Failed > *job.Spec.BackoffLimit {
msg := fmt.Sprintf("deploy failed, job %s has failed %d times ", apprls.Status.InstallJobName, job.Status.Failed)
return ctrl.Result{}, r.updateStatus(ctx, apprls, appv2.StatusDeployFailed, msg)
}
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
if err != nil {
msg := fmt.Sprintf("%s helm create job failed err: %v", apprls.Name, err)
err = r.updateStatus(ctx, apprls, appv2.StatusFailed, msg)
return ctrl.Result{}, err
}
switch release.Info.Status {
case helmrelease.StatusFailed:
if strings.Contains(release.Info.Description, "context deadline exceeded") && reCheck < timeoutMaxRecheck {
if apprls.Status.State != appv2.StatusTimeout {
err = r.updateStatus(ctx, apprls, appv2.StatusTimeout, "install time out")
if err != nil {
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("install time out, will check status again after %d second", timeoutVerificationAgain)
return ctrl.Result{RequeueAfter: timeoutVerificationAgain * time.Second}, nil
}
deployed, err := application.UpdateHelmStatus(dstKubeConfig, release)
if err != nil {
return ctrl.Result{}, err
}
apprls.Annotations[appv2.TimeoutRecheck] = strconv.Itoa(reCheck + 1)
patch, _ := json.Marshal(apprls)
err = r.Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("update recheck times %s for %s", strconv.Itoa(reCheck+1), apprls.Name)
if deployed {
err = r.updateStatus(ctx, apprls, appv2.StatusActive, "StatusActive")
if err != nil {
klog.Errorf("failed to update apprelease %s %v", apprls.Name, err)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
return ctrl.Result{RequeueAfter: timeoutVerificationAgain * time.Second}, nil
}
err = r.updateStatus(ctx, apprls, appv2.StatusFailed, release.Info.Description)
return ctrl.Result{}, err
case helmrelease.StatusDeployed:
err = r.updateStatus(ctx, apprls, appv2.StatusActive)
return ctrl.Result{}, err
default:
klog.V(5).Infof("helm release %s/%s status %s, check again after %d second", apprls.GetRlsNamespace(), apprls.Name, release.Info.Status, verificationAgain)
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
}
if apprls.Status.State == appv2.StatusCreating || apprls.Status.State == appv2.StatusUpgrading {
return ctrl.Result{}, r.createOrUpgradeAppRelease(ctx, apprls, executor, helmKubeConfig)
}
return ctrl.Result{}, nil
}
func (r *AppReleaseReconciler) removeAll(ctx context.Context, apprls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (ct ctrl.Result, err error) {
err = r.updateStatus(ctx, apprls, appv2.StatusDeleting)
if err != nil {
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
return ctrl.Result{}, err
}
uninstallJobName, err := r.uninstall(ctx, apprls, executor, kubeconfig)
if err != nil {
klog.Errorf("failed to uninstall helm release %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
err = r.cleanStore(ctx, apprls)
if err != nil {
klog.Errorf("failed to clean store: %v", err)
return ctrl.Result{}, err
}
klog.Infof("remove apprelease %s success", apprls.Name)
if uninstallJobName != "" {
klog.Infof("try to update uninstall apprls job name %s to apprelease %s", uninstallJobName, apprls.Name)
apprls.Status.UninstallJobName = uninstallJobName
apprls.Status.LastUpdate = metav1.Now()
patch, _ := json.Marshal(apprls)
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
return ctrl.Result{}, err
}
klog.Infof("update uninstall apprls job name %s to apprelease %s success", uninstallJobName, apprls.Name)
}
return ctrl.Result{}, nil
}
func (r *AppReleaseReconciler) getClusterDynamicClient(clusterName string, apprls *appv2.ApplicationRelease) (*dynamic.DynamicClient, error) {
clusterClient, err := r.clusterClientSet.GetClusterClient(clusterName)
if err != nil {
klog.Errorf("failed to get cluster client: %v", err)
return nil, err
}
creator := apprls.Annotations[constants.CreatorAnnotationKey]
conf := *clusterClient.RestConfig
if creator != "" {
conf.Impersonate = rest.ImpersonationConfig{
UserName: creator,
}
}
klog.Infof("DynamicClient impersonate kubeAsUser: %s", creator)
dynamicClient, err := dynamic.NewForConfig(&conf)
return dynamicClient, err
}
func (r *AppReleaseReconciler) getClusterInfo(clusterName string) ([]byte, client.Client, error) {
cluster, err := r.clusterClientSet.Get(clusterName)
if err != nil {
return nil, nil, err
}
runtimeClient, err := r.clusterClientSet.GetRuntimeClient(clusterName)
if err != nil {
return nil, nil, err
}
return cluster.Spec.Connection.KubeConfig, runtimeClient, nil
}
func (r *AppReleaseReconciler) updateStatus(ctx context.Context, apprls *appv2.ApplicationRelease, status string, message ...string) error {
apprls.Status.State = status
if message != nil {
apprls.Status.Message = message[0]
}
apprls.Status.LastUpdate = metav1.Now()
patch, _ := json.Marshal(apprls)
return r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
}

View File

@@ -0,0 +1,110 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"strings"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"k8s.io/klog/v2"
"kubesphere.io/utils/s3"
"kubesphere.io/kubesphere/pkg/simple/client/application"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
appVersionController = "appversion-controller"
)
var _ reconcile.Reconciler = &AppVersionReconciler{}
var _ kscontroller.Controller = &AppVersionReconciler{}
type AppVersionReconciler struct {
client.Client
ossStore s3.Interface
cmStore s3.Interface
}
func (r *AppVersionReconciler) Name() string {
return appVersionController
}
func (r *AppVersionReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *AppVersionReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
r.Client = mgr.GetClient()
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
return err
}
return ctrl.NewControllerManagedBy(mgr).
Named(appVersionController).
For(&appv2.ApplicationVersion{}).
Complete(r)
}
func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
appVersion := &appv2.ApplicationVersion{}
if err := r.Client.Get(ctx, req.NamespacedName, appVersion); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
//Delete app files, non-important logic, errors will not affect the main process
if !appVersion.ObjectMeta.DeletionTimestamp.IsZero() {
err := r.deleteFile(ctx, appVersion)
if err != nil {
klog.Errorf("Failed to clean file for appversion %s: %v", appVersion.Name, err)
}
}
return ctrl.Result{}, nil
}
func (r *AppVersionReconciler) deleteFile(ctx context.Context, appVersion *appv2.ApplicationVersion) error {
defer func() {
controllerutil.RemoveFinalizer(appVersion, appv2.StoreCleanFinalizer)
err := r.Update(ctx, appVersion)
if err != nil {
klog.Errorf("Failed to remove finalizer from appversion %s: %v", appVersion.Name, err)
}
klog.Infof("Remove finalizer from appversion %s successfully", appVersion.Name)
}()
klog.Infof("ApplicationVersion %s has been deleted, try to clean file", appVersion.Name)
id := []string{appVersion.Name}
apprls := &appv2.ApplicationReleaseList{}
err := r.Client.List(ctx, apprls, client.MatchingLabels{appv2.AppVersionIDLabelKey: appVersion.Name})
if err != nil {
klog.Errorf("Failed to list ApplicationRelease: %v", err)
return err
}
if len(apprls.Items) > 0 {
klog.Infof("ApplicationVersion %s is still in use, keep file in store", appVersion.Name)
return nil
}
err = application.FailOverDelete(r.cmStore, r.ossStore, id)
if err != nil {
klog.Errorf("Fail to delete appversion %s from store: %v", appVersion.Name, err)
return err
}
klog.Infof("Delete file %s from store successfully", appVersion.Name)
return nil
}

View File

@@ -0,0 +1,31 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
type ClusterDeletePredicate struct {
predicate.Funcs
}
func (ClusterDeletePredicate) Update(e event.UpdateEvent) bool {
return false
}
func (ClusterDeletePredicate) Create(_ event.CreateEvent) bool {
return false
}
func (ClusterDeletePredicate) Delete(_ event.DeleteEvent) bool {
return true
}
func (ClusterDeletePredicate) Generic(_ event.GenericEvent) bool {
return false
}

View File

@@ -1,86 +0,0 @@
// Copyright 2022 The KubeSphere Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package application
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
)
func setReadyCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionTrue, reason, message)
}
// NotReady - shortcut to set ready condition to false
func setNotReadyCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionFalse, reason, message)
}
// Unknown - shortcut to set ready condition to unknown
func setReadyUnknownCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionUnknown, reason, message)
}
// setErrorCondition - shortcut to set error condition
func setErrorCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
setCondition(appStatus, appv1beta1.Error, corev1.ConditionTrue, reason, message)
}
// clearErrorCondition - shortcut to set error condition
func clearErrorCondition(appStatus *appv1beta1.ApplicationStatus) {
setCondition(appStatus, appv1beta1.Error, corev1.ConditionFalse, "NoError", "No error seen")
}
func setCondition(appStatus *appv1beta1.ApplicationStatus, ctype appv1beta1.ConditionType, status corev1.ConditionStatus, reason, message string) {
var c *appv1beta1.Condition
for i := range appStatus.Conditions {
if appStatus.Conditions[i].Type == ctype {
c = &appStatus.Conditions[i]
}
}
if c == nil {
addCondition(appStatus, ctype, status, reason, message)
} else {
// check message ?
if c.Status == status && c.Reason == reason && c.Message == message {
return
}
now := metav1.Now()
c.LastUpdateTime = now
if c.Status != status {
c.LastTransitionTime = now
}
c.Status = status
c.Reason = reason
c.Message = message
}
}
func addCondition(appStatus *appv1beta1.ApplicationStatus, ctype appv1beta1.ConditionType, status corev1.ConditionStatus, reason, message string) {
now := metav1.Now()
c := appv1beta1.Condition{
Type: ctype,
LastUpdateTime: now,
LastTransitionTime: now,
Status: status,
Reason: reason,
Message: message,
}
appStatus.Conditions = append(appStatus.Conditions, c)
}

View File

@@ -0,0 +1,213 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
batchv1 "k8s.io/api/batch/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
"kubesphere.io/utils/helm"
"sigs.k8s.io/controller-runtime/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/application"
)
func (r *AppReleaseReconciler) uninstall(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (jobName string, err error) {
klog.Infof("uninstall helm release %s", rls.Name)
creator := rls.Annotations[constants.CreatorAnnotationKey]
klog.Infof("helm impersonate kubeAsUser: %s", creator)
options := []helm.HelmOption{
helm.SetNamespace(rls.GetRlsNamespace()),
helm.SetKubeconfig(kubeconfig),
}
if jobName, err = executor.Uninstall(ctx, rls.Name, options...); err != nil {
klog.Error(err, "failed to force delete helm release")
return jobName, err
}
klog.Infof("uninstall helm release %s success,job name: %s", rls.Name, jobName)
return jobName, nil
}
func (r *AppReleaseReconciler) jobStatus(job *batchv1.Job) (active, completed, failed bool) {
active = job.Status.Active > 0
completed = (job.Spec.Completions != nil && job.Status.Succeeded >= *job.Spec.Completions) || job.Status.Succeeded > 0
failed = (job.Spec.BackoffLimit != nil && job.Status.Failed > *job.Spec.BackoffLimit) || job.Status.Failed > 0
return
}
func (r *AppReleaseReconciler) createOrUpgradeAppRelease(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) error {
clusterName := rls.GetRlsCluster()
namespace := rls.GetRlsNamespace()
klog.Infof("begin to create or upgrade %s app release %s in cluster %s ns: %s", rls.Spec.AppType, rls.Name, clusterName, namespace)
creator := rls.Annotations[constants.CreatorAnnotationKey]
klog.Infof("helm impersonate kubeAsUser: %s", creator)
options := []helm.HelmOption{
helm.SetInstall(true),
helm.SetNamespace(namespace),
helm.SetKubeAsUser(creator),
helm.SetKubeconfig(kubeconfig),
}
if rls.Spec.AppType == appv2.AppTypeHelm {
_, err := executor.Get(ctx, rls.Name, options...)
if err != nil && err.Error() == "release: not found" {
klog.Infof("release %s not found, begin to create", rls.Name)
}
if err == nil {
klog.Infof("release %s found, begin to upgrade status", rls.Name)
return r.updateStatus(ctx, rls, appv2.StatusCreated)
}
}
data, err := application.FailOverGet(r.cmStore, r.ossStore, rls.Spec.AppVersionID, r.Client, true)
if err != nil {
klog.Errorf("failed to get app version data, err: %v", err)
return err
}
options = append(options, helm.SetChartData(data))
if rls.Status.InstallJobName, err = executor.Upgrade(ctx, rls.Name, "", rls.Spec.Values, options...); err != nil {
klog.Errorf("failed to create executor job, err: %v", err)
return r.updateStatus(ctx, rls, appv2.StatusFailed, err.Error())
}
return r.updateStatus(ctx, rls, appv2.StatusCreated)
}
func (r *AppReleaseReconciler) getExecutor(apprls *appv2.ApplicationRelease, kubeConfig []byte, runClient client.Client) (executor helm.Executor, err error) {
if apprls.Spec.AppType == appv2.AppTypeHelm {
return r.getHelmExecutor(apprls, kubeConfig)
}
return r.getYamlInstaller(runClient, apprls)
}
func (r *AppReleaseReconciler) getYamlInstaller(runClient client.Client, apprls *appv2.ApplicationRelease) (executor helm.Executor, err error) {
dynamicClient, err := r.getClusterDynamicClient(apprls.GetRlsCluster(), apprls)
if err != nil {
klog.Errorf("failed to get dynamic client: %v", err)
return nil, err
}
jsonList, err := application.ReadYaml(apprls.Spec.Values)
if err != nil {
klog.Errorf("failed to read yaml: %v", err)
return nil, err
}
var gvrListInfo []application.InsInfo
for _, i := range jsonList {
gvr, utd, err := application.GetInfoFromBytes(i, runClient.RESTMapper())
if err != nil {
klog.Errorf("failed to get info from bytes: %v", err)
return nil, err
}
ins := application.InsInfo{
GroupVersionResource: gvr,
Name: utd.GetName(),
Namespace: utd.GetNamespace(),
}
gvrListInfo = append(gvrListInfo, ins)
}
return application.YamlInstaller{
Mapper: runClient.RESTMapper(),
DynamicCli: dynamicClient,
GvrListInfo: gvrListInfo,
Namespace: apprls.GetRlsNamespace(),
}, nil
}
func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease, kubeconfig []byte) (executor helm.Executor, err error) {
executorOptions := []helm.ExecutorOption{
helm.SetExecutorKubeConfig(kubeconfig),
helm.SetExecutorNamespace(apprls.GetRlsNamespace()),
helm.SetExecutorImage(r.HelmExecutorOptions.Image),
helm.SetExecutorBackoffLimit(0),
helm.SetExecutorLabels(labels.Set{
appv2.AppReleaseReferenceLabelKey: apprls.Name,
constants.KubeSphereManagedLabel: "true",
}),
helm.SetTTLSecondsAfterFinished(r.HelmExecutorOptions.JobTTLAfterFinished),
}
executor, err = helm.NewExecutor(executorOptions...)
if err != nil {
klog.Errorf("failed to create helm executor: %v", err)
return nil, err
}
return executor, err
}
func (r *AppReleaseReconciler) cleanJob(ctx context.Context, apprls *appv2.ApplicationRelease, runClient client.Client) (wait bool, err error) {
jobs := &batchv1.JobList{}
opts := []client.ListOption{client.InNamespace(apprls.GetRlsNamespace()), client.MatchingLabels{appv2.AppReleaseReferenceLabelKey: apprls.Name}}
err = runClient.List(ctx, jobs, opts...)
if err != nil {
klog.Errorf("failed to list job for %s: %v", apprls.Name, err)
return false, err
}
if len(jobs.Items) == 0 {
klog.Infof("cluster: %s namespace: %s no job found for %s", apprls.GetRlsCluster(), apprls.GetRlsNamespace(), apprls.Name)
return false, nil
}
klog.Infof("found %d jobs for %s", len(jobs.Items), apprls.Name)
for _, job := range jobs.Items {
klog.Infof("begin to clean job %s/%s", job.Namespace, job.Name)
jobActive, jobCompleted, failed := r.jobStatus(&job)
if jobActive {
klog.Infof("job %s is still active", job.Name)
return true, nil
}
if jobCompleted || failed {
deletePolicy := metav1.DeletePropagationBackground
opt := client.DeleteOptions{PropagationPolicy: &deletePolicy}
err = runClient.Delete(ctx, &job, &opt)
if err != nil {
klog.Errorf("failed to delete job %s: %v", job.Name, err)
return false, err
}
klog.Infof("job %s has been deleted", job.Name)
} else {
klog.Infof("job:%s status unknown, wait for next reconcile: %v", job.Name, job.Status)
return true, nil
}
}
klog.Infof("all job has been deleted")
return false, nil
}
func (r *AppReleaseReconciler) cleanStore(ctx context.Context, apprls *appv2.ApplicationRelease) (err error) {
name := apprls.Labels[appv2.AppVersionIDLabelKey]
appVersion := &appv2.ApplicationVersion{}
err = r.Get(ctx, client.ObjectKey{Name: name}, appVersion)
if apierrors.IsNotFound(err) {
klog.Infof("appVersion %s has been deleted, cleanup file in oss", name)
err = application.FailOverDelete(r.cmStore, r.ossStore, []string{appVersion.Name})
if err != nil {
klog.Warningf("failed to cleanup file in oss: %v", err)
return nil
}
}
klog.Infof("appVersion %s still exists, no need to cleanup file in oss", name)
return nil
}

View File

@@ -0,0 +1,266 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"kubesphere.io/utils/s3"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
helmrepo "helm.sh/helm/v3/pkg/repo"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/kubesphere/pkg/simple/client/application"
)
const helmRepoController = "helmrepo-controller"
var _ reconcile.Reconciler = &RepoReconciler{}
var _ kscontroller.Controller = &RepoReconciler{}
type RepoReconciler struct {
recorder record.EventRecorder
client.Client
ossStore s3.Interface
cmStore s3.Interface
}
func (r *RepoReconciler) Name() string {
return helmRepoController
}
func (r *RepoReconciler) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
func (r *RepoReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
r.Client = mgr.GetClient()
r.recorder = mgr.GetEventRecorderFor(helmRepoController)
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&appv2.Repo{}).
Complete(r)
}
func (r *RepoReconciler) UpdateStatus(ctx context.Context, helmRepo *appv2.Repo) error {
newRepo := &appv2.Repo{}
newRepo.Name = helmRepo.Name
newRepo.Status.State = helmRepo.Status.State
newRepo.Status.LastUpdateTime = metav1.Now()
patch, _ := json.Marshal(newRepo)
err := r.Status().Patch(ctx, newRepo, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("update status failed, error: %s", err)
return err
}
klog.Infof("update status successfully, repo: %s", helmRepo.GetName())
return nil
}
func (r *RepoReconciler) noNeedSync(ctx context.Context, helmRepo *appv2.Repo) (bool, error) {
if helmRepo.Spec.SyncPeriod == 0 {
if helmRepo.Status.State != appv2.StatusNosync {
helmRepo.Status.State = appv2.StatusNosync
klog.Infof("no sync when SyncPeriod=0, repo: %s", helmRepo.GetName())
if err := r.UpdateStatus(ctx, helmRepo); err != nil {
klog.Errorf("update status failed, error: %s", err)
return false, err
}
}
klog.Infof("no sync when SyncPeriod=0, repo: %s", helmRepo.GetName())
return true, nil
}
passed := time.Since(helmRepo.Status.LastUpdateTime.Time).Seconds()
if helmRepo.Status.State == appv2.StatusSuccessful && passed < float64(helmRepo.Spec.SyncPeriod) {
klog.Infof("last sync time is %s, passed %f, no need to sync, repo: %s", helmRepo.Status.LastUpdateTime, passed, helmRepo.GetName())
return true, nil
}
return false, nil
}
func filterVersions(versions []*helmrepo.ChartVersion) []*helmrepo.ChartVersion {
versionMap := make(map[string]*helmrepo.ChartVersion)
for _, v := range versions {
if existing, found := versionMap[v.Version]; found {
if v.Created.After(existing.Created) {
versionMap[v.Version] = v
}
} else {
versionMap[v.Version] = v
}
}
result := make([]*helmrepo.ChartVersion, 0, len(versionMap))
for _, v := range versionMap {
result = append(result, v)
}
sort.Slice(result, func(i, j int) bool {
return result[i].Created.After(result[j].Created)
})
return result
}
func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
helmRepo := &appv2.Repo{}
if err := r.Client.Get(ctx, request.NamespacedName, helmRepo); err != nil {
klog.Errorf("get helm repo failed, error: %s", err)
return reconcile.Result{}, client.IgnoreNotFound(err)
}
requeueAfter := time.Duration(helmRepo.Spec.SyncPeriod) * time.Second
noSync, err := r.noNeedSync(ctx, helmRepo)
if err != nil {
return reconcile.Result{}, err
}
if noSync {
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
helmRepo.Status.State = appv2.StatusSyncing
err = r.UpdateStatus(ctx, helmRepo)
if err != nil {
klog.Errorf("update status failed, error: %s", err)
return reconcile.Result{}, err
}
index, err := application.LoadRepoIndex(helmRepo.Spec.Url, helmRepo.Spec.Credential)
if err != nil {
klog.Errorf("load index failed, repo: %s, url: %s, err: %s", helmRepo.GetName(), helmRepo.Spec.Url, err)
return reconcile.Result{}, err
}
for appName, versions := range index.Entries {
if len(versions) == 0 {
klog.Infof("no version found for %s", appName)
continue
}
versions = filterVersions(versions)
if len(versions) > appv2.MaxNumOfVersions {
versions = versions[:appv2.MaxNumOfVersions]
}
vRequests, err := repoParseRequest(r.Client, versions, helmRepo, appName)
if err != nil {
klog.Errorf("parse request failed, error: %s", err)
return reconcile.Result{}, err
}
klog.Infof("found %d/%d versions for %s need to upgrade", len(vRequests), len(versions), appName)
if len(vRequests) == 0 {
continue
}
own := metav1.OwnerReference{
APIVersion: appv2.SchemeGroupVersion.String(),
Kind: "Repo",
Name: helmRepo.Name,
UID: helmRepo.UID,
}
if err = application.CreateOrUpdateApp(r.Client, vRequests, r.cmStore, r.ossStore, own); err != nil {
klog.Errorf("create or update app failed, error: %s", err)
return reconcile.Result{}, err
}
}
helmRepo.Status.State = appv2.StatusSuccessful
err = r.UpdateStatus(ctx, helmRepo)
if err != nil {
klog.Errorf("update status failed, error: %s", err)
return reconcile.Result{}, err
}
r.recorder.Eventf(helmRepo, corev1.EventTypeNormal, "Synced", "HelmRepo %s synced successfully", helmRepo.GetName())
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRepo *appv2.Repo, appName string) (result []application.AppRequest, err error) {
appVersionList := &appv2.ApplicationVersionList{}
opts := client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{appv2.RepoIDLabelKey: helmRepo.Name}),
}
err = cli.List(context.Background(), appVersionList, &opts)
if err != nil {
klog.Errorf("list appversion failed, error: %s", err)
return nil, err
}
appVersionDigestMap := make(map[string]string)
for _, i := range appVersionList.Items {
key := fmt.Sprintf("%s-%s", i.GetLabels()[appv2.AppIDLabelKey], i.Spec.VersionName)
appVersionDigestMap[key] = i.Spec.Digest
}
for _, ver := range versions {
ver.Version = application.FormatVersion(ver.Version)
shortName := application.GenerateShortNameMD5Hash(ver.Name)
key := fmt.Sprintf("%s-%s-%s", helmRepo.Name, shortName, ver.Version)
dig := appVersionDigestMap[key]
if dig == ver.Digest {
continue
} else {
klog.Infof("digest not match, key: %s, digest: %s, ver.Digest: %s", key, dig, ver.Digest)
}
vRequest := application.AppRequest{
RepoName: helmRepo.Name,
VersionName: ver.Version,
AppName: fmt.Sprintf("%s-%s", helmRepo.Name, shortName),
AliasName: appName,
OriginalName: appName,
AppHome: ver.Home,
Icon: ver.Icon,
Digest: ver.Digest,
Description: ver.Description,
Abstraction: ver.Description,
Maintainers: application.GetMaintainers(ver.Maintainers),
AppType: appv2.AppTypeHelm,
Workspace: helmRepo.GetWorkspace(),
Credential: helmRepo.Spec.Credential,
FromRepo: true,
}
url := ver.URLs[0]
methodList := []string{"https://", "http://", "s3://"}
needContact := true
for _, method := range methodList {
if strings.HasPrefix(url, method) {
needContact = false
break
}
}
if needContact {
url = strings.TrimSuffix(helmRepo.Spec.Url, "/") + "/" + url
}
vRequest.PullUrl = url
result = append(result, vRequest)
}
return result, nil
}

View File

@@ -0,0 +1,31 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package application
import (
"sigs.k8s.io/controller-runtime/pkg/event"
)
type IgnoreAnnotationChangePredicate struct {
AnnotationKey string
}
func (p IgnoreAnnotationChangePredicate) Create(e event.CreateEvent) bool {
return true
}
func (p IgnoreAnnotationChangePredicate) Delete(e event.DeleteEvent) bool {
return true
}
func (p IgnoreAnnotationChangePredicate) Update(e event.UpdateEvent) bool {
return e.ObjectOld.GetAnnotations()[p.AnnotationKey] == e.ObjectNew.GetAnnotations()[p.AnnotationKey]
}
func (p IgnoreAnnotationChangePredicate) Generic(e event.GenericEvent) bool {
return true
}

View File

@@ -1,301 +0,0 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package application
import (
"strings"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
// Constants defining labels
const (
StatusReady = "Ready"
StatusInProgress = "InProgress"
StatusUnknown = "Unknown"
StatusDisabled = "Disabled"
)
func status(u *unstructured.Unstructured) (string, error) {
gk := u.GroupVersionKind().GroupKind()
switch gk.String() {
case "StatefulSet.apps":
return stsStatus(u)
case "Deployment.apps":
return deploymentStatus(u)
case "ReplicaSet.apps":
return replicasetStatus(u)
case "DaemonSet.apps":
return daemonsetStatus(u)
case "PersistentVolumeClaim":
return pvcStatus(u)
case "Service":
return serviceStatus(u)
case "Pod":
return podStatus(u)
case "PodDisruptionBudget.policy":
return pdbStatus(u)
case "ReplicationController":
return replicationControllerStatus(u)
case "Job.batch":
return jobStatus(u)
default:
return statusFromStandardConditions(u)
}
}
// Status from standard conditions
func statusFromStandardConditions(u *unstructured.Unstructured) (string, error) {
condition := StatusReady
// Check Ready condition
_, cs, found, err := getConditionOfType(u, StatusReady)
if err != nil {
return StatusUnknown, err
}
if found && cs == corev1.ConditionFalse {
condition = StatusInProgress
}
// Check InProgress condition
_, cs, found, err = getConditionOfType(u, StatusInProgress)
if err != nil {
return StatusUnknown, err
}
if found && cs == corev1.ConditionTrue {
condition = StatusInProgress
}
return condition, nil
}
// Statefulset
func stsStatus(u *unstructured.Unstructured) (string, error) {
sts := &appsv1.StatefulSet{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, sts); err != nil {
return StatusUnknown, err
}
if sts.Status.ObservedGeneration == sts.Generation &&
sts.Status.Replicas == *sts.Spec.Replicas &&
sts.Status.ReadyReplicas == *sts.Spec.Replicas &&
sts.Status.CurrentReplicas == *sts.Spec.Replicas {
return StatusReady, nil
}
return StatusInProgress, nil
}
// Deployment
func deploymentStatus(u *unstructured.Unstructured) (string, error) {
deployment := &appsv1.Deployment{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, deployment); err != nil {
return StatusUnknown, err
}
replicaFailure := false
progressing := false
available := false
for _, condition := range deployment.Status.Conditions {
switch condition.Type {
case appsv1.DeploymentProgressing:
if condition.Status == corev1.ConditionTrue && condition.Reason == "NewReplicaSetAvailable" {
progressing = true
}
case appsv1.DeploymentAvailable:
if condition.Status == corev1.ConditionTrue {
available = true
}
case appsv1.DeploymentReplicaFailure:
if condition.Status == corev1.ConditionTrue {
replicaFailure = true
break
}
}
}
if deployment.Status.ObservedGeneration == deployment.Generation &&
deployment.Status.Replicas == *deployment.Spec.Replicas &&
deployment.Status.ReadyReplicas == *deployment.Spec.Replicas &&
deployment.Status.AvailableReplicas == *deployment.Spec.Replicas &&
deployment.Status.Conditions != nil && len(deployment.Status.Conditions) > 0 &&
(progressing || available) && !replicaFailure {
return StatusReady, nil
}
return StatusInProgress, nil
}
// Replicaset
func replicasetStatus(u *unstructured.Unstructured) (string, error) {
rs := &appsv1.ReplicaSet{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, rs); err != nil {
return StatusUnknown, err
}
replicaFailure := false
for _, condition := range rs.Status.Conditions {
switch condition.Type {
case appsv1.ReplicaSetReplicaFailure:
if condition.Status == corev1.ConditionTrue {
replicaFailure = true
break
}
}
}
if rs.Status.ObservedGeneration == rs.Generation &&
rs.Status.Replicas == *rs.Spec.Replicas &&
rs.Status.ReadyReplicas == *rs.Spec.Replicas &&
rs.Status.AvailableReplicas == *rs.Spec.Replicas && !replicaFailure {
return StatusReady, nil
}
return StatusInProgress, nil
}
// Daemonset
func daemonsetStatus(u *unstructured.Unstructured) (string, error) {
ds := &appsv1.DaemonSet{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, ds); err != nil {
return StatusUnknown, err
}
if ds.Status.ObservedGeneration == ds.Generation &&
ds.Status.DesiredNumberScheduled == ds.Status.NumberAvailable &&
ds.Status.DesiredNumberScheduled == ds.Status.NumberReady {
return StatusReady, nil
}
return StatusInProgress, nil
}
// PVC
func pvcStatus(u *unstructured.Unstructured) (string, error) {
pvc := &corev1.PersistentVolumeClaim{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pvc); err != nil {
return StatusUnknown, err
}
if pvc.Status.Phase == corev1.ClaimBound {
return StatusReady, nil
}
return StatusInProgress, nil
}
// Service
func serviceStatus(u *unstructured.Unstructured) (string, error) {
service := &corev1.Service{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, service); err != nil {
return StatusUnknown, err
}
stype := service.Spec.Type
if stype == corev1.ServiceTypeClusterIP || stype == corev1.ServiceTypeNodePort || stype == corev1.ServiceTypeExternalName ||
stype == corev1.ServiceTypeLoadBalancer && isEmpty(service.Spec.ClusterIP) &&
len(service.Status.LoadBalancer.Ingress) > 0 && !hasEmptyIngressIP(service.Status.LoadBalancer.Ingress) {
return StatusReady, nil
}
return StatusInProgress, nil
}
// Pod
func podStatus(u *unstructured.Unstructured) (string, error) {
pod := &corev1.Pod{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pod); err != nil {
return StatusUnknown, err
}
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodReady && (condition.Reason == "PodCompleted" || condition.Status == corev1.ConditionTrue) {
return StatusReady, nil
}
}
return StatusInProgress, nil
}
// PodDisruptionBudget
func pdbStatus(u *unstructured.Unstructured) (string, error) {
pdb := &policyv1beta1.PodDisruptionBudget{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pdb); err != nil {
return StatusUnknown, err
}
if pdb.Status.ObservedGeneration == pdb.Generation &&
pdb.Status.CurrentHealthy >= pdb.Status.DesiredHealthy {
return StatusReady, nil
}
return StatusInProgress, nil
}
func replicationControllerStatus(u *unstructured.Unstructured) (string, error) {
rc := &corev1.ReplicationController{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, rc); err != nil {
return StatusUnknown, err
}
if rc.Status.ObservedGeneration == rc.Generation &&
rc.Status.Replicas == *rc.Spec.Replicas &&
rc.Status.ReadyReplicas == *rc.Spec.Replicas &&
rc.Status.AvailableReplicas == *rc.Spec.Replicas {
return StatusReady, nil
}
return StatusInProgress, nil
}
func jobStatus(u *unstructured.Unstructured) (string, error) {
job := &batchv1.Job{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, job); err != nil {
return StatusUnknown, err
}
if job.Status.StartTime == nil {
return StatusInProgress, nil
}
return StatusReady, nil
}
func hasEmptyIngressIP(ingress []corev1.LoadBalancerIngress) bool {
for _, i := range ingress {
if isEmpty(i.IP) {
return true
}
}
return false
}
func isEmpty(s string) bool {
return len(strings.TrimSpace(s)) == 0
}
func getConditionOfType(u *unstructured.Unstructured, conditionType string) (string, corev1.ConditionStatus, bool, error) {
conditions, found, err := unstructured.NestedSlice(u.Object, "status", "conditions")
if err != nil || !found {
return "", corev1.ConditionFalse, false, err
}
for _, c := range conditions {
condition, ok := c.(map[string]interface{})
if !ok {
continue
}
t, found := condition["type"]
if !found {
continue
}
condType, ok := t.(string)
if !ok {
continue
}
if condType == conditionType {
reason := condition["reason"].(string)
conditionStatus := condition["status"].(string)
return reason, corev1.ConditionStatus(conditionStatus), true, nil
}
}
return "", corev1.ConditionFalse, false, nil
}