feat: kubesphere 4.0 (#6115)
* feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> * feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> --------- Signed-off-by: ci-bot <ci-bot@kubesphere.io> Co-authored-by: ks-ci-bot <ks-ci-bot@example.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
committed by
GitHub
parent
b5015ec7b9
commit
447a51f08b
72
pkg/controller/k8sapplication/condition.go
Normal file
72
pkg/controller/k8sapplication/condition.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package k8sapplication
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
)
|
||||
|
||||
func setReadyCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionTrue, reason, message)
|
||||
}
|
||||
|
||||
// NotReady - shortcut to set ready condition to false
|
||||
func setNotReadyCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionFalse, reason, message)
|
||||
}
|
||||
|
||||
// Unknown - shortcut to set ready condition to unknown
|
||||
func setReadyUnknownCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionUnknown, reason, message)
|
||||
}
|
||||
|
||||
// setErrorCondition - shortcut to set error condition
|
||||
func setErrorCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Error, corev1.ConditionTrue, reason, message)
|
||||
}
|
||||
|
||||
// clearErrorCondition - shortcut to set error condition
|
||||
func clearErrorCondition(appStatus *appv1beta1.ApplicationStatus) {
|
||||
setCondition(appStatus, appv1beta1.Error, corev1.ConditionFalse, "NoError", "No error seen")
|
||||
}
|
||||
|
||||
func setCondition(appStatus *appv1beta1.ApplicationStatus, ctype appv1beta1.ConditionType, status corev1.ConditionStatus, reason, message string) {
|
||||
var c *appv1beta1.Condition
|
||||
for i := range appStatus.Conditions {
|
||||
if appStatus.Conditions[i].Type == ctype {
|
||||
c = &appStatus.Conditions[i]
|
||||
}
|
||||
}
|
||||
if c == nil {
|
||||
addCondition(appStatus, ctype, status, reason, message)
|
||||
} else {
|
||||
// check message ?
|
||||
if c.Status == status && c.Reason == reason && c.Message == message {
|
||||
return
|
||||
}
|
||||
now := metav1.Now()
|
||||
c.LastUpdateTime = now
|
||||
if c.Status != status {
|
||||
c.LastTransitionTime = now
|
||||
}
|
||||
c.Status = status
|
||||
c.Reason = reason
|
||||
c.Message = message
|
||||
}
|
||||
}
|
||||
|
||||
func addCondition(appStatus *appv1beta1.ApplicationStatus, ctype appv1beta1.ConditionType, status corev1.ConditionStatus, reason, message string) {
|
||||
now := metav1.Now()
|
||||
c := appv1beta1.Condition{
|
||||
Type: ctype,
|
||||
LastUpdateTime: now,
|
||||
LastTransitionTime: now,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
appStatus.Conditions = append(appStatus.Conditions, c)
|
||||
}
|
||||
310
pkg/controller/k8sapplication/k8sapplication_controller.go
Normal file
310
pkg/controller/k8sapplication/k8sapplication_controller.go
Normal file
@@ -0,0 +1,310 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package k8sapplication
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
)
|
||||
|
||||
const controllerName = "k8sapplication"
|
||||
|
||||
var _ kscontroller.Controller = &Reconciler{}
|
||||
var _ reconcile.Reconciler = &Reconciler{}
|
||||
|
||||
// Reconciler reconciles a Application object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
Mapper meta.RESTMapper
|
||||
Scheme *runtime.Scheme
|
||||
ApplicationSelector labels.Selector
|
||||
}
|
||||
|
||||
func (r *Reconciler) Name() string {
|
||||
return controllerName
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
r.Mapper = mgr.GetRESTMapper()
|
||||
r.Scheme = mgr.GetScheme()
|
||||
|
||||
selector, err := labels.Parse(mgr.Options.ComposedAppOptions.AppSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ApplicationSelector = selector
|
||||
|
||||
c, err := ctrl.NewControllerManagedBy(mgr).
|
||||
For(&appv1beta1.Application{}).
|
||||
Build(r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sources := []client.Object{
|
||||
&v1.Deployment{},
|
||||
&corev1.Service{},
|
||||
&v1.StatefulSet{},
|
||||
&networkv1.Ingress{},
|
||||
}
|
||||
|
||||
for _, s := range sources {
|
||||
// Watch for changes to Application
|
||||
err = c.Watch(
|
||||
source.Kind(mgr.GetCache(), s),
|
||||
handler.EnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: GetApplictionName(obj.GetLabels()),
|
||||
Namespace: obj.GetNamespace()}}}
|
||||
}),
|
||||
predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return isApp(e.ObjectOld, e.ObjectOld)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return isApp(e.Object)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return isApp(e.Object)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
var app appv1beta1.Application
|
||||
err := r.Get(ctx, req.NamespacedName, &app)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// If label selector were given, only reconcile matched applications
|
||||
// match annotations and labels
|
||||
if !r.ApplicationSelector.Empty() {
|
||||
if !r.ApplicationSelector.Matches(labels.Set(app.Labels)) &&
|
||||
!r.ApplicationSelector.Matches(labels.Set(app.Annotations)) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Application is in the process of being deleted, so no need to do anything.
|
||||
if app.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
resources, errs := r.updateComponents(ctx, &app)
|
||||
newApplicationStatus := r.getNewApplicationStatus(ctx, &app, resources, &errs)
|
||||
|
||||
newApplicationStatus.ObservedGeneration = app.Generation
|
||||
if equality.Semantic.DeepEqual(newApplicationStatus, &app.Status) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
err = r.updateApplicationStatus(ctx, req.NamespacedName, newApplicationStatus)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
func (r *Reconciler) updateComponents(ctx context.Context, app *appv1beta1.Application) ([]*unstructured.Unstructured, []error) {
|
||||
var errs []error
|
||||
resources := r.fetchComponentListResources(ctx, app.Spec.ComponentGroupKinds, app.Spec.Selector, app.Namespace, &errs)
|
||||
|
||||
if app.Spec.AddOwnerRef {
|
||||
ownerRef := metav1.NewControllerRef(app, appv1beta1.GroupVersion.WithKind("Application"))
|
||||
*ownerRef.Controller = false
|
||||
if err := r.setOwnerRefForResources(ctx, *ownerRef, resources); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return resources, errs
|
||||
}
|
||||
|
||||
func (r *Reconciler) getNewApplicationStatus(ctx context.Context, app *appv1beta1.Application, resources []*unstructured.Unstructured, errList *[]error) *appv1beta1.ApplicationStatus {
|
||||
objectStatuses := r.objectStatuses(ctx, resources, errList)
|
||||
errs := utilerrors.NewAggregate(*errList)
|
||||
|
||||
aggReady, countReady := aggregateReady(objectStatuses)
|
||||
|
||||
newApplicationStatus := app.Status.DeepCopy()
|
||||
newApplicationStatus.ComponentList = appv1beta1.ComponentList{
|
||||
Objects: objectStatuses,
|
||||
}
|
||||
newApplicationStatus.ComponentsReady = fmt.Sprintf("%d/%d", countReady, len(objectStatuses))
|
||||
if errs != nil {
|
||||
setReadyUnknownCondition(newApplicationStatus, "ComponentsReadyUnknown", "failed to aggregate all components' statuses, check the Error condition for details")
|
||||
} else if aggReady {
|
||||
setReadyCondition(newApplicationStatus, "ComponentsReady", "all components ready")
|
||||
} else {
|
||||
setNotReadyCondition(newApplicationStatus, "ComponentsNotReady", fmt.Sprintf("%d components not ready", len(objectStatuses)-countReady))
|
||||
}
|
||||
|
||||
if errs != nil {
|
||||
setErrorCondition(newApplicationStatus, "ErrorSeen", errs.Error())
|
||||
} else {
|
||||
clearErrorCondition(newApplicationStatus)
|
||||
}
|
||||
|
||||
return newApplicationStatus
|
||||
}
|
||||
|
||||
func (r *Reconciler) fetchComponentListResources(ctx context.Context, groupKinds []metav1.GroupKind, selector *metav1.LabelSelector, namespace string, errs *[]error) []*unstructured.Unstructured {
|
||||
var resources []*unstructured.Unstructured
|
||||
|
||||
if selector == nil {
|
||||
klog.V(2).Info("No selector is specified")
|
||||
return resources
|
||||
}
|
||||
|
||||
for _, gk := range groupKinds {
|
||||
mapping, err := r.Mapper.RESTMapping(schema.GroupKind{
|
||||
Group: appv1beta1.StripVersion(gk.Group),
|
||||
Kind: gk.Kind,
|
||||
})
|
||||
if err != nil {
|
||||
klog.V(2).Info("NoMappingForGK", "gk", gk.String())
|
||||
continue
|
||||
}
|
||||
|
||||
list := &unstructured.UnstructuredList{}
|
||||
list.SetGroupVersionKind(mapping.GroupVersionKind)
|
||||
if err = r.Client.List(ctx, list, client.InNamespace(namespace), client.MatchingLabels(selector.MatchLabels)); err != nil {
|
||||
klog.Error(err, "unable to list resources for GVK", "gvk", mapping.GroupVersionKind)
|
||||
*errs = append(*errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, u := range list.Items {
|
||||
resource := u
|
||||
resources = append(resources, &resource)
|
||||
}
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
func (r *Reconciler) setOwnerRefForResources(ctx context.Context, ownerRef metav1.OwnerReference, resources []*unstructured.Unstructured) error {
|
||||
for _, resource := range resources {
|
||||
ownerRefs := resource.GetOwnerReferences()
|
||||
ownerRefFound := false
|
||||
for i, refs := range ownerRefs {
|
||||
if ownerRef.Kind == refs.Kind &&
|
||||
ownerRef.APIVersion == refs.APIVersion &&
|
||||
ownerRef.Name == refs.Name {
|
||||
ownerRefFound = true
|
||||
if ownerRef.UID != refs.UID {
|
||||
ownerRefs[i] = ownerRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !ownerRefFound {
|
||||
ownerRefs = append(ownerRefs, ownerRef)
|
||||
}
|
||||
resource.SetOwnerReferences(ownerRefs)
|
||||
err := r.Client.Update(ctx, resource)
|
||||
if err != nil {
|
||||
// We log this error, but we continue and try to set the ownerRefs on the other resources.
|
||||
klog.Error(err, "ErrorSettingOwnerRef", "gvk", resource.GroupVersionKind().String(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) objectStatuses(ctx context.Context, resources []*unstructured.Unstructured, errs *[]error) []appv1beta1.ObjectStatus {
|
||||
var objectStatuses []appv1beta1.ObjectStatus
|
||||
for _, resource := range resources {
|
||||
os := appv1beta1.ObjectStatus{
|
||||
Group: resource.GroupVersionKind().Group,
|
||||
Kind: resource.GetKind(),
|
||||
Name: resource.GetName(),
|
||||
Link: resource.GetSelfLink(),
|
||||
}
|
||||
s, err := status(resource)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to compute status for resource", "gvk", resource.GroupVersionKind().String(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
*errs = append(*errs, err)
|
||||
}
|
||||
os.Status = s
|
||||
objectStatuses = append(objectStatuses, os)
|
||||
}
|
||||
return objectStatuses
|
||||
}
|
||||
|
||||
func aggregateReady(objectStatuses []appv1beta1.ObjectStatus) (bool, int) {
|
||||
countReady := 0
|
||||
for _, os := range objectStatuses {
|
||||
if os.Status == StatusReady {
|
||||
countReady++
|
||||
}
|
||||
}
|
||||
if countReady == len(objectStatuses) {
|
||||
return true, countReady
|
||||
}
|
||||
return false, countReady
|
||||
}
|
||||
|
||||
func (r *Reconciler) updateApplicationStatus(ctx context.Context, nn types.NamespacedName, status *appv1beta1.ApplicationStatus) error {
|
||||
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
original := &appv1beta1.Application{}
|
||||
if err := r.Get(ctx, nn, original); err != nil {
|
||||
return err
|
||||
}
|
||||
original.Status = *status
|
||||
if err := r.Client.Status().Update(ctx, original); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update status of Application %s/%s: %v", nn.Namespace, nn.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isApp(obs ...metav1.Object) bool {
|
||||
for _, o := range obs {
|
||||
if o.GetLabels() != nil && IsAppComponent(o.GetLabels()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
301
pkg/controller/k8sapplication/status.go
Normal file
301
pkg/controller/k8sapplication/status.go
Normal file
@@ -0,0 +1,301 @@
|
||||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package k8sapplication
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Constants defining labels
|
||||
const (
|
||||
StatusReady = "Ready"
|
||||
StatusInProgress = "InProgress"
|
||||
StatusUnknown = "Unknown"
|
||||
StatusDisabled = "Disabled"
|
||||
)
|
||||
|
||||
func status(u *unstructured.Unstructured) (string, error) {
|
||||
gk := u.GroupVersionKind().GroupKind()
|
||||
switch gk.String() {
|
||||
case "StatefulSet.apps":
|
||||
return stsStatus(u)
|
||||
case "Deployment.apps":
|
||||
return deploymentStatus(u)
|
||||
case "ReplicaSet.apps":
|
||||
return replicasetStatus(u)
|
||||
case "DaemonSet.apps":
|
||||
return daemonsetStatus(u)
|
||||
case "PersistentVolumeClaim":
|
||||
return pvcStatus(u)
|
||||
case "Service":
|
||||
return serviceStatus(u)
|
||||
case "Pod":
|
||||
return podStatus(u)
|
||||
case "PodDisruptionBudget.policy":
|
||||
return pdbStatus(u)
|
||||
case "ReplicationController":
|
||||
return replicationControllerStatus(u)
|
||||
case "Job.batch":
|
||||
return jobStatus(u)
|
||||
default:
|
||||
return statusFromStandardConditions(u)
|
||||
}
|
||||
}
|
||||
|
||||
// Status from standard conditions
|
||||
func statusFromStandardConditions(u *unstructured.Unstructured) (string, error) {
|
||||
condition := StatusReady
|
||||
|
||||
// Check Ready condition
|
||||
_, cs, found, err := getConditionOfType(u, StatusReady)
|
||||
if err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
if found && cs == corev1.ConditionFalse {
|
||||
condition = StatusInProgress
|
||||
}
|
||||
|
||||
// Check InProgress condition
|
||||
_, cs, found, err = getConditionOfType(u, StatusInProgress)
|
||||
if err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
if found && cs == corev1.ConditionTrue {
|
||||
condition = StatusInProgress
|
||||
}
|
||||
|
||||
return condition, nil
|
||||
}
|
||||
|
||||
// Statefulset
|
||||
func stsStatus(u *unstructured.Unstructured) (string, error) {
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, sts); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if sts.Status.ObservedGeneration == sts.Generation &&
|
||||
sts.Status.Replicas == *sts.Spec.Replicas &&
|
||||
sts.Status.ReadyReplicas == *sts.Spec.Replicas &&
|
||||
sts.Status.CurrentReplicas == *sts.Spec.Replicas {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Deployment
|
||||
func deploymentStatus(u *unstructured.Unstructured) (string, error) {
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, deployment); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
replicaFailure := false
|
||||
progressing := false
|
||||
available := false
|
||||
|
||||
for _, condition := range deployment.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case appsv1.DeploymentProgressing:
|
||||
if condition.Status == corev1.ConditionTrue && condition.Reason == "NewReplicaSetAvailable" {
|
||||
progressing = true
|
||||
}
|
||||
case appsv1.DeploymentAvailable:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
available = true
|
||||
}
|
||||
case appsv1.DeploymentReplicaFailure:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
replicaFailure = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if deployment.Status.ObservedGeneration == deployment.Generation &&
|
||||
deployment.Status.Replicas == *deployment.Spec.Replicas &&
|
||||
deployment.Status.ReadyReplicas == *deployment.Spec.Replicas &&
|
||||
deployment.Status.AvailableReplicas == *deployment.Spec.Replicas &&
|
||||
deployment.Status.Conditions != nil && len(deployment.Status.Conditions) > 0 &&
|
||||
(progressing || available) && !replicaFailure {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Replicaset
|
||||
func replicasetStatus(u *unstructured.Unstructured) (string, error) {
|
||||
rs := &appsv1.ReplicaSet{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, rs); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
replicaFailure := false
|
||||
for _, condition := range rs.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case appsv1.ReplicaSetReplicaFailure:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
replicaFailure = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if rs.Status.ObservedGeneration == rs.Generation &&
|
||||
rs.Status.Replicas == *rs.Spec.Replicas &&
|
||||
rs.Status.ReadyReplicas == *rs.Spec.Replicas &&
|
||||
rs.Status.AvailableReplicas == *rs.Spec.Replicas && !replicaFailure {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Daemonset
|
||||
func daemonsetStatus(u *unstructured.Unstructured) (string, error) {
|
||||
ds := &appsv1.DaemonSet{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, ds); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if ds.Status.ObservedGeneration == ds.Generation &&
|
||||
ds.Status.DesiredNumberScheduled == ds.Status.NumberAvailable &&
|
||||
ds.Status.DesiredNumberScheduled == ds.Status.NumberReady {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// PVC
|
||||
func pvcStatus(u *unstructured.Unstructured) (string, error) {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pvc); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if pvc.Status.Phase == corev1.ClaimBound {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Service
|
||||
func serviceStatus(u *unstructured.Unstructured) (string, error) {
|
||||
service := &corev1.Service{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, service); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
stype := service.Spec.Type
|
||||
|
||||
if stype == corev1.ServiceTypeClusterIP || stype == corev1.ServiceTypeNodePort || stype == corev1.ServiceTypeExternalName ||
|
||||
stype == corev1.ServiceTypeLoadBalancer && isEmpty(service.Spec.ClusterIP) &&
|
||||
len(service.Status.LoadBalancer.Ingress) > 0 && !hasEmptyIngressIP(service.Status.LoadBalancer.Ingress) {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Pod
|
||||
func podStatus(u *unstructured.Unstructured) (string, error) {
|
||||
pod := &corev1.Pod{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pod); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == corev1.PodReady && (condition.Reason == "PodCompleted" || condition.Status == corev1.ConditionTrue) {
|
||||
return StatusReady, nil
|
||||
}
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// PodDisruptionBudget
|
||||
func pdbStatus(u *unstructured.Unstructured) (string, error) {
|
||||
pdb := &policyv1beta1.PodDisruptionBudget{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pdb); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if pdb.Status.ObservedGeneration == pdb.Generation &&
|
||||
pdb.Status.CurrentHealthy >= pdb.Status.DesiredHealthy {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
func replicationControllerStatus(u *unstructured.Unstructured) (string, error) {
|
||||
rc := &corev1.ReplicationController{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, rc); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if rc.Status.ObservedGeneration == rc.Generation &&
|
||||
rc.Status.Replicas == *rc.Spec.Replicas &&
|
||||
rc.Status.ReadyReplicas == *rc.Spec.Replicas &&
|
||||
rc.Status.AvailableReplicas == *rc.Spec.Replicas {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
func jobStatus(u *unstructured.Unstructured) (string, error) {
|
||||
job := &batchv1.Job{}
|
||||
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, job); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if job.Status.StartTime == nil {
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
return StatusReady, nil
|
||||
}
|
||||
|
||||
func hasEmptyIngressIP(ingress []corev1.LoadBalancerIngress) bool {
|
||||
for _, i := range ingress {
|
||||
if isEmpty(i.IP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(s string) bool {
|
||||
return len(strings.TrimSpace(s)) == 0
|
||||
}
|
||||
|
||||
func getConditionOfType(u *unstructured.Unstructured, conditionType string) (string, corev1.ConditionStatus, bool, error) {
|
||||
conditions, found, err := unstructured.NestedSlice(u.Object, "status", "conditions")
|
||||
if err != nil || !found {
|
||||
return "", corev1.ConditionFalse, false, err
|
||||
}
|
||||
|
||||
for _, c := range conditions {
|
||||
condition, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
t, found := condition["type"]
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
condType, ok := t.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if condType == conditionType {
|
||||
reason := condition["reason"].(string)
|
||||
conditionStatus := condition["status"].(string)
|
||||
return reason, corev1.ConditionStatus(conditionStatus), true, nil
|
||||
}
|
||||
}
|
||||
return "", corev1.ConditionFalse, false, nil
|
||||
}
|
||||
102
pkg/controller/k8sapplication/utils.go
Normal file
102
pkg/controller/k8sapplication/utils.go
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package k8sapplication
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
AppLabel = "app"
|
||||
VersionLabel = "version"
|
||||
ApplicationNameLabel = "app.kubernetes.io/name"
|
||||
ApplicationVersionLabel = "app.kubernetes.io/version"
|
||||
)
|
||||
|
||||
// resource with these following labels considered as part of servicemesh
|
||||
var ApplicationLabels = [...]string{
|
||||
ApplicationNameLabel,
|
||||
ApplicationVersionLabel,
|
||||
AppLabel,
|
||||
}
|
||||
|
||||
// resource with these following labels considered as part of kubernetes-sigs/application
|
||||
var AppLabels = [...]string{
|
||||
ApplicationNameLabel,
|
||||
ApplicationVersionLabel,
|
||||
}
|
||||
|
||||
var TrimChars = [...]string{".", "_", "-"}
|
||||
|
||||
// normalize version names
|
||||
// strip [_.-]
|
||||
func NormalizeVersionName(version string) string {
|
||||
for _, char := range TrimChars {
|
||||
version = strings.ReplaceAll(version, char, "")
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
func GetApplictionName(lbs map[string]string) string {
|
||||
if name, ok := lbs[ApplicationNameLabel]; ok {
|
||||
return name
|
||||
}
|
||||
return ""
|
||||
|
||||
}
|
||||
|
||||
func GetComponentName(meta *v1.ObjectMeta) string {
|
||||
if len(meta.Labels[AppLabel]) > 0 {
|
||||
return meta.Labels[AppLabel]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetComponentVersion(meta *v1.ObjectMeta) string {
|
||||
if len(meta.Labels[VersionLabel]) > 0 {
|
||||
return meta.Labels[VersionLabel]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func ExtractApplicationLabels(meta *v1.ObjectMeta) map[string]string {
|
||||
|
||||
labels := make(map[string]string, len(ApplicationLabels))
|
||||
for _, label := range ApplicationLabels {
|
||||
if _, ok := meta.Labels[label]; !ok {
|
||||
return nil
|
||||
} else {
|
||||
labels[label] = meta.Labels[label]
|
||||
}
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
func IsApplicationComponent(lbs map[string]string) bool {
|
||||
|
||||
for _, label := range ApplicationLabels {
|
||||
if _, ok := lbs[label]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Whether it belongs to kubernetes-sigs/application or not
|
||||
func IsAppComponent(lbs map[string]string) bool {
|
||||
|
||||
for _, label := range AppLabels {
|
||||
if _, ok := lbs[label]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
Reference in New Issue
Block a user