application controller will only reconcile applications matched with given label selector
Signed-off-by: Jeff <jeffzhang@yunify.com>
This commit is contained in:
@@ -18,45 +18,239 @@ package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1beta12 "k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Add creates a new Application Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
// ApplicationReconciler reconciles a Application object
|
||||
type ApplicationReconciler struct {
|
||||
client.Client
|
||||
Mapper meta.RESTMapper
|
||||
Scheme *runtime.Scheme
|
||||
ApplicationSelector labels.Selector //
|
||||
}
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileApplication{Client: mgr.GetClient(), scheme: mgr.GetScheme(),
|
||||
recorder: mgr.GetEventRecorderFor("application-controller")}
|
||||
func (r *ApplicationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
var app appv1beta1.Application
|
||||
err := r.Get(context.Background(), req.NamespacedName, &app)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// If label selector were given, only reconcile matched applications
|
||||
// match annotations and labels
|
||||
if !r.ApplicationSelector.Empty() {
|
||||
if !r.ApplicationSelector.Matches(labels.Set(app.Labels)) &&
|
||||
!r.ApplicationSelector.Matches(labels.Set(app.Annotations)) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Application is in the process of being deleted, so no need to do anything.
|
||||
if app.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
resources, errs := r.updateComponents(context.Background(), &app)
|
||||
newApplicationStatus := r.getNewApplicationStatus(context.Background(), &app, resources, &errs)
|
||||
|
||||
newApplicationStatus.ObservedGeneration = app.Generation
|
||||
if equality.Semantic.DeepEqual(newApplicationStatus, &app.Status) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
err = r.updateApplicationStatus(context.Background(), req.NamespacedName, newApplicationStatus)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("application-controller", mgr, controller.Options{Reconciler: r})
|
||||
func (r *ApplicationReconciler) updateComponents(ctx context.Context, app *appv1beta1.Application) ([]*unstructured.Unstructured, []error) {
|
||||
var errs []error
|
||||
resources := r.fetchComponentListResources(ctx, app.Spec.ComponentGroupKinds, app.Spec.Selector, app.Namespace, &errs)
|
||||
|
||||
if app.Spec.AddOwnerRef {
|
||||
ownerRef := metav1.NewControllerRef(app, appv1beta1.GroupVersion.WithKind("Application"))
|
||||
*ownerRef.Controller = false
|
||||
if err := r.setOwnerRefForResources(ctx, *ownerRef, resources); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return resources, errs
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) getNewApplicationStatus(ctx context.Context, app *appv1beta1.Application, resources []*unstructured.Unstructured, errList *[]error) *appv1beta1.ApplicationStatus {
|
||||
objectStatuses := r.objectStatuses(ctx, resources, errList)
|
||||
errs := utilerrors.NewAggregate(*errList)
|
||||
|
||||
aggReady, countReady := aggregateReady(objectStatuses)
|
||||
|
||||
newApplicationStatus := app.Status.DeepCopy()
|
||||
newApplicationStatus.ComponentList = appv1beta1.ComponentList{
|
||||
Objects: objectStatuses,
|
||||
}
|
||||
newApplicationStatus.ComponentsReady = fmt.Sprintf("%d/%d", countReady, len(objectStatuses))
|
||||
if errs != nil {
|
||||
setReadyUnknownCondition(newApplicationStatus, "ComponentsReadyUnknown", "failed to aggregate all components' statuses, check the Error condition for details")
|
||||
} else if aggReady {
|
||||
setReadyCondition(newApplicationStatus, "ComponentsReady", "all components ready")
|
||||
} else {
|
||||
setNotReadyCondition(newApplicationStatus, "ComponentsNotReady", fmt.Sprintf("%d components not ready", len(objectStatuses)-countReady))
|
||||
}
|
||||
|
||||
if errs != nil {
|
||||
setErrorCondition(newApplicationStatus, "ErrorSeen", errs.Error())
|
||||
} else {
|
||||
clearErrorCondition(newApplicationStatus)
|
||||
}
|
||||
|
||||
return newApplicationStatus
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) fetchComponentListResources(ctx context.Context, groupKinds []metav1.GroupKind, selector *metav1.LabelSelector, namespace string, errs *[]error) []*unstructured.Unstructured {
|
||||
var resources []*unstructured.Unstructured
|
||||
|
||||
if selector == nil {
|
||||
klog.V(2).Info("No selector is specified")
|
||||
return resources
|
||||
}
|
||||
|
||||
for _, gk := range groupKinds {
|
||||
mapping, err := r.Mapper.RESTMapping(schema.GroupKind{
|
||||
Group: appv1beta1.StripVersion(gk.Group),
|
||||
Kind: gk.Kind,
|
||||
})
|
||||
if err != nil {
|
||||
klog.V(2).Info("NoMappingForGK", "gk", gk.String())
|
||||
continue
|
||||
}
|
||||
|
||||
list := &unstructured.UnstructuredList{}
|
||||
list.SetGroupVersionKind(mapping.GroupVersionKind)
|
||||
if err = r.Client.List(ctx, list, client.InNamespace(namespace), client.MatchingLabels(selector.MatchLabels)); err != nil {
|
||||
klog.Error(err, "unable to list resources for GVK", "gvk", mapping.GroupVersionKind)
|
||||
*errs = append(*errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, u := range list.Items {
|
||||
resource := u
|
||||
resources = append(resources, &resource)
|
||||
}
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) setOwnerRefForResources(ctx context.Context, ownerRef metav1.OwnerReference, resources []*unstructured.Unstructured) error {
|
||||
for _, resource := range resources {
|
||||
ownerRefs := resource.GetOwnerReferences()
|
||||
ownerRefFound := false
|
||||
for i, refs := range ownerRefs {
|
||||
if ownerRef.Kind == refs.Kind &&
|
||||
ownerRef.APIVersion == refs.APIVersion &&
|
||||
ownerRef.Name == refs.Name {
|
||||
ownerRefFound = true
|
||||
if ownerRef.UID != refs.UID {
|
||||
ownerRefs[i] = ownerRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !ownerRefFound {
|
||||
ownerRefs = append(ownerRefs, ownerRef)
|
||||
}
|
||||
resource.SetOwnerReferences(ownerRefs)
|
||||
err := r.Client.Update(ctx, resource)
|
||||
if err != nil {
|
||||
// We log this error, but we continue and try to set the ownerRefs on the other resources.
|
||||
klog.Error(err, "ErrorSettingOwnerRef", "gvk", resource.GroupVersionKind().String(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) objectStatuses(ctx context.Context, resources []*unstructured.Unstructured, errs *[]error) []appv1beta1.ObjectStatus {
|
||||
var objectStatuses []appv1beta1.ObjectStatus
|
||||
for _, resource := range resources {
|
||||
os := appv1beta1.ObjectStatus{
|
||||
Group: resource.GroupVersionKind().Group,
|
||||
Kind: resource.GetKind(),
|
||||
Name: resource.GetName(),
|
||||
Link: resource.GetSelfLink(),
|
||||
}
|
||||
s, err := status(resource)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to compute status for resource", "gvk", resource.GroupVersionKind().String(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
*errs = append(*errs, err)
|
||||
}
|
||||
os.Status = s
|
||||
objectStatuses = append(objectStatuses, os)
|
||||
}
|
||||
return objectStatuses
|
||||
}
|
||||
|
||||
func aggregateReady(objectStatuses []appv1beta1.ObjectStatus) (bool, int) {
|
||||
countReady := 0
|
||||
for _, os := range objectStatuses {
|
||||
if os.Status == StatusReady {
|
||||
countReady++
|
||||
}
|
||||
}
|
||||
if countReady == len(objectStatuses) {
|
||||
return true, countReady
|
||||
}
|
||||
return false, countReady
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) updateApplicationStatus(ctx context.Context, nn types.NamespacedName, status *appv1beta1.ApplicationStatus) error {
|
||||
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
original := &appv1beta1.Application{}
|
||||
if err := r.Get(ctx, nn, original); err != nil {
|
||||
return err
|
||||
}
|
||||
original.Status = *status
|
||||
if err := r.Client.Status().Update(ctx, original); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update status of Application %s/%s: %v", nn.Namespace, nn.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := ctrl.NewControllerManagedBy(mgr).
|
||||
Named("application-controller").
|
||||
For(&appv1beta1.Application{}).Build(r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -98,46 +292,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileApplication{}
|
||||
|
||||
// ReconcileApplication reconciles a Workspace object
|
||||
type ReconcileApplication struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=app.k8s.io,resources=applications,verbs=get;list;watch;create;update;patch;delete
|
||||
func (r *ReconcileApplication) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
// Fetch the Application instance
|
||||
ctx := context.Background()
|
||||
app := &v1beta1.Application{}
|
||||
err := r.Get(ctx, request.NamespacedName, app)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Errorf("application %s not found in namespace %s", request.Name, request.Namespace)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// add specified annotation for app when triggered by sub-resources,
|
||||
// so the application in sigs.k8s.io can reconcile to update status
|
||||
annotations := app.GetObjectMeta().GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
annotations["kubesphere.io/last-updated"] = time.Now().String()
|
||||
app.SetAnnotations(annotations)
|
||||
err = r.Update(ctx, app)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(4).Infof("application %s has been deleted during update in namespace %s", request.Name, request.Namespace)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
var _ reconcile.Reconciler = &ApplicationReconciler{}
|
||||
|
||||
func isApp(obs ...metav1.Object) bool {
|
||||
for _, o := range obs {
|
||||
|
||||
@@ -19,6 +19,10 @@ package application
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
@@ -27,71 +31,131 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
applicationName = "bookinfo"
|
||||
serviceName = "productpage"
|
||||
timeout = time.Second * 30
|
||||
interval = time.Second * 2
|
||||
)
|
||||
|
||||
var replicas = int32(2)
|
||||
var _ = Describe("Application", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
var _ = Context("Inside of a new namespace", func() {
|
||||
ctx := context.TODO()
|
||||
ns := SetupTest(ctx)
|
||||
|
||||
service := newService("productpage")
|
||||
app := newAppliation(service)
|
||||
deployments := []*v1.Deployment{newDeployments(service, "v1")}
|
||||
|
||||
BeforeEach(func() {
|
||||
|
||||
// Create application service and deployment
|
||||
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, service)).Should(Succeed())
|
||||
for i := range deployments {
|
||||
deployment := deployments[i]
|
||||
Expect(k8sClient.Create(ctx, deployment)).Should(Succeed())
|
||||
Describe("Application", func() {
|
||||
applicationLabels := map[string]string{
|
||||
"app.kubernetes.io/name": "bookinfo",
|
||||
"app.kubernetes.io/version": "1",
|
||||
}
|
||||
})
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("Application Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
BeforeEach(func() {
|
||||
By("create deployment,service,application objects")
|
||||
service := newService(serviceName, ns.Name, applicationLabels)
|
||||
deployments := []*v1.Deployment{newDeployments(serviceName, ns.Name, applicationLabels, "v1")}
|
||||
app := newApplication(applicationName, ns.Name, applicationLabels)
|
||||
|
||||
By("Reconcile Application successfully")
|
||||
// application should have "kubesphere.io/last-updated" annotation
|
||||
Eventually(func() bool {
|
||||
app := &v1beta1.Application{}
|
||||
_ = k8sClient.Get(ctx, types.NamespacedName{Name: service.Labels[servicemesh.ApplicationNameLabel], Namespace: metav1.NamespaceDefault}, app)
|
||||
time, ok := app.Annotations["kubesphere.io/last-updated"]
|
||||
return len(time) > 0 && ok
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(k8sClient.Create(ctx, service.DeepCopy())).Should(Succeed())
|
||||
for i := range deployments {
|
||||
deployment := deployments[i]
|
||||
Expect(k8sClient.Create(ctx, deployment.DeepCopy())).Should(Succeed())
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
|
||||
})
|
||||
|
||||
Context("Application Controller", func() {
|
||||
It("Should not reconcile application", func() {
|
||||
By("update application labels")
|
||||
application := &v1beta1.Application{}
|
||||
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
|
||||
Expect(err).Should(Succeed())
|
||||
|
||||
updateApplication := func(object interface{}) {
|
||||
newApp := object.(*v1beta1.Application)
|
||||
newApp.Labels["kubesphere.io/creator"] = ""
|
||||
}
|
||||
|
||||
updated, err := updateWithRetries(k8sClient, ctx, application.Namespace, applicationName, updateApplication, 1 * time.Second, 5 * time.Second)
|
||||
Expect(updated).Should(BeTrue())
|
||||
|
||||
Eventually(func() bool {
|
||||
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
|
||||
|
||||
// application status field should not be populated with selected deployments and services
|
||||
return len(application.Status.ComponentList.Objects) == 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
})
|
||||
|
||||
It("Should reconcile application successfully", func() {
|
||||
|
||||
By("check if application status been updated by controller")
|
||||
application := &v1beta1.Application{}
|
||||
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
|
||||
Expect(err).Should(Succeed())
|
||||
|
||||
// application status field should be populated by controller
|
||||
return len(application.Status.ComponentList.Objects) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newDeployments(service *corev1.Service, version string) *v1.Deployment {
|
||||
lbs := service.Labels
|
||||
lbs["version"] = version
|
||||
type UpdateObjectFunc func(obj interface{})
|
||||
|
||||
func updateWithRetries(client client.Client, ctx context.Context, namespace, name string, updateFunc UpdateObjectFunc, interval, timeout time.Duration)(bool, error) {
|
||||
var updateErr error
|
||||
|
||||
pollErr := wait.PollImmediate(interval, timeout, func() (done bool, err error) {
|
||||
app := &v1beta1.Application{}
|
||||
if err = client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, app); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
updateFunc(app)
|
||||
if err = client.Update(ctx, app); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided update to object %q: %v", name, updateErr)
|
||||
return false, pollErr
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func newDeployments(deploymentName, namespace string, labels map[string]string, version string) *v1.Deployment {
|
||||
labels["app"] = deploymentName
|
||||
labels["version"] = version
|
||||
|
||||
deployment := &v1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", service.Name, version),
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: lbs,
|
||||
Name: fmt.Sprintf("%s-%s", deploymentName, version),
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
|
||||
},
|
||||
Spec: v1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: lbs,
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: lbs,
|
||||
Annotations: service.Annotations,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
@@ -130,16 +194,14 @@ func newDeployments(service *corev1.Service, version string) *v1.Deployment {
|
||||
return deployment
|
||||
}
|
||||
|
||||
func newService(name string) *corev1.Service {
|
||||
func newService(serviceName, namesapce string, labels map[string]string) *corev1.Service {
|
||||
labels["app"] = serviceName
|
||||
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "bookinfo",
|
||||
"app.kubernetes.io/version": "1",
|
||||
"app": name,
|
||||
},
|
||||
Name: serviceName,
|
||||
Namespace: namesapce,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{
|
||||
"servicemesh.kubesphere.io/enabled": "true",
|
||||
},
|
||||
@@ -162,11 +224,7 @@ func newService(name string) *corev1.Service {
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"app.kubernetes.io/name": "bookinfo",
|
||||
"app.kubernetes.io/version": "1",
|
||||
"app": "foo",
|
||||
},
|
||||
Selector: labels,
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
Status: corev1.ServiceStatus{},
|
||||
@@ -174,12 +232,12 @@ func newService(name string) *corev1.Service {
|
||||
return svc
|
||||
}
|
||||
|
||||
func newAppliation(service *corev1.Service) *v1beta1.Application {
|
||||
func newApplication(applicationName, namespace string, labels map[string]string) *v1beta1.Application {
|
||||
app := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: service.Labels[servicemesh.ApplicationNameLabel],
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: service.Labels,
|
||||
Name: applicationName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
@@ -193,6 +251,9 @@ func newAppliation(service *corev1.Service) *v1beta1.Application {
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
AddOwnerRef: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -17,12 +17,15 @@ limitations under the License.
|
||||
package application
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -40,8 +43,8 @@ import (
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestApplicationController(t *testing.T) {
|
||||
@@ -55,44 +58,23 @@ var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
|
||||
sch := scheme.Scheme
|
||||
err := appv1beta1.AddToScheme(sch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = apis.AddToScheme(sch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
var err error
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: sch,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
err = appv1beta1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = Add(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
klog.Error(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
@@ -100,7 +82,69 @@ var _ = BeforeSuite(func(done Done) {
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
// SetupTest will setup a testing environment.
|
||||
// This includes:
|
||||
// * creating a Namespace to be used during the test
|
||||
// * starting application controller
|
||||
// * stopping application controller after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
var stopCh chan struct{}
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
|
||||
|
||||
selector, _ := labels.Parse("app.kubernetes.io/name,!kubesphere.io/creator")
|
||||
|
||||
reconciler := &ApplicationReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Mapper: mgr.GetRESTMapper(),
|
||||
ApplicationSelector: selector,
|
||||
}
|
||||
err = reconciler.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup application reconciler")
|
||||
|
||||
go func() {
|
||||
err = mgr.Start(stopCh)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
|
||||
err := k8sClient.Delete(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
|
||||
})
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
|
||||
|
||||
func randStringRunes(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
72
pkg/controller/application/condition.go
Normal file
72
pkg/controller/application/condition.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package application
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
)
|
||||
|
||||
func setReadyCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionTrue, reason, message)
|
||||
}
|
||||
|
||||
// NotReady - shortcut to set ready condition to false
|
||||
func setNotReadyCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionFalse, reason, message)
|
||||
}
|
||||
|
||||
// Unknown - shortcut to set ready condition to unknown
|
||||
func setReadyUnknownCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Ready, corev1.ConditionUnknown, reason, message)
|
||||
}
|
||||
|
||||
// setErrorCondition - shortcut to set error condition
|
||||
func setErrorCondition(appStatus *appv1beta1.ApplicationStatus, reason, message string) {
|
||||
setCondition(appStatus, appv1beta1.Error, corev1.ConditionTrue, reason, message)
|
||||
}
|
||||
|
||||
// clearErrorCondition - shortcut to set error condition
|
||||
func clearErrorCondition(appStatus *appv1beta1.ApplicationStatus) {
|
||||
setCondition(appStatus, appv1beta1.Error, corev1.ConditionFalse, "NoError", "No error seen")
|
||||
}
|
||||
|
||||
func setCondition(appStatus *appv1beta1.ApplicationStatus, ctype appv1beta1.ConditionType, status corev1.ConditionStatus, reason, message string) {
|
||||
var c *appv1beta1.Condition
|
||||
for i := range appStatus.Conditions {
|
||||
if appStatus.Conditions[i].Type == ctype {
|
||||
c = &appStatus.Conditions[i]
|
||||
}
|
||||
}
|
||||
if c == nil {
|
||||
addCondition(appStatus, ctype, status, reason, message)
|
||||
} else {
|
||||
// check message ?
|
||||
if c.Status == status && c.Reason == reason && c.Message == message {
|
||||
return
|
||||
}
|
||||
now := metav1.Now()
|
||||
c.LastUpdateTime = now
|
||||
if c.Status != status {
|
||||
c.LastTransitionTime = now
|
||||
}
|
||||
c.Status = status
|
||||
c.Reason = reason
|
||||
c.Message = message
|
||||
}
|
||||
}
|
||||
|
||||
func addCondition(appStatus *appv1beta1.ApplicationStatus, ctype appv1beta1.ConditionType, status corev1.ConditionStatus, reason, message string) {
|
||||
now := metav1.Now()
|
||||
c := appv1beta1.Condition{
|
||||
Type: ctype,
|
||||
LastUpdateTime: now,
|
||||
LastTransitionTime: now,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
appStatus.Conditions = append(appStatus.Conditions, c)
|
||||
}
|
||||
301
pkg/controller/application/status.go
Normal file
301
pkg/controller/application/status.go
Normal file
@@ -0,0 +1,301 @@
|
||||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package application
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Constants defining labels
|
||||
const (
|
||||
StatusReady = "Ready"
|
||||
StatusInProgress = "InProgress"
|
||||
StatusUnknown = "Unknown"
|
||||
StatusDisabled = "Disabled"
|
||||
)
|
||||
|
||||
func status(u *unstructured.Unstructured) (string, error) {
|
||||
gk := u.GroupVersionKind().GroupKind()
|
||||
switch gk.String() {
|
||||
case "StatefulSet.apps":
|
||||
return stsStatus(u)
|
||||
case "Deployment.apps":
|
||||
return deploymentStatus(u)
|
||||
case "ReplicaSet.apps":
|
||||
return replicasetStatus(u)
|
||||
case "DaemonSet.apps":
|
||||
return daemonsetStatus(u)
|
||||
case "PersistentVolumeClaim":
|
||||
return pvcStatus(u)
|
||||
case "Service":
|
||||
return serviceStatus(u)
|
||||
case "Pod":
|
||||
return podStatus(u)
|
||||
case "PodDisruptionBudget.policy":
|
||||
return pdbStatus(u)
|
||||
case "ReplicationController":
|
||||
return replicationControllerStatus(u)
|
||||
case "Job.batch":
|
||||
return jobStatus(u)
|
||||
default:
|
||||
return statusFromStandardConditions(u)
|
||||
}
|
||||
}
|
||||
|
||||
// Status from standard conditions
|
||||
func statusFromStandardConditions(u *unstructured.Unstructured) (string, error) {
|
||||
condition := StatusReady
|
||||
|
||||
// Check Ready condition
|
||||
_, cs, found, err := getConditionOfType(u, StatusReady)
|
||||
if err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
if found && cs == corev1.ConditionFalse {
|
||||
condition = StatusInProgress
|
||||
}
|
||||
|
||||
// Check InProgress condition
|
||||
_, cs, found, err = getConditionOfType(u, StatusInProgress)
|
||||
if err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
if found && cs == corev1.ConditionTrue {
|
||||
condition = StatusInProgress
|
||||
}
|
||||
|
||||
return condition, nil
|
||||
}
|
||||
|
||||
// Statefulset
|
||||
func stsStatus(u *unstructured.Unstructured) (string, error) {
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, sts); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if sts.Status.ObservedGeneration == sts.Generation &&
|
||||
sts.Status.Replicas == *sts.Spec.Replicas &&
|
||||
sts.Status.ReadyReplicas == *sts.Spec.Replicas &&
|
||||
sts.Status.CurrentReplicas == *sts.Spec.Replicas {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Deployment
|
||||
func deploymentStatus(u *unstructured.Unstructured) (string, error) {
|
||||
deployment := &appsv1.Deployment{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, deployment); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
replicaFailure := false
|
||||
progressing := false
|
||||
available := false
|
||||
|
||||
for _, condition := range deployment.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case appsv1.DeploymentProgressing:
|
||||
if condition.Status == corev1.ConditionTrue && condition.Reason == "NewReplicaSetAvailable" {
|
||||
progressing = true
|
||||
}
|
||||
case appsv1.DeploymentAvailable:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
available = true
|
||||
}
|
||||
case appsv1.DeploymentReplicaFailure:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
replicaFailure = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if deployment.Status.ObservedGeneration == deployment.Generation &&
|
||||
deployment.Status.Replicas == *deployment.Spec.Replicas &&
|
||||
deployment.Status.ReadyReplicas == *deployment.Spec.Replicas &&
|
||||
deployment.Status.AvailableReplicas == *deployment.Spec.Replicas &&
|
||||
deployment.Status.Conditions != nil && len(deployment.Status.Conditions) > 0 &&
|
||||
(progressing || available) && !replicaFailure {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Replicaset
|
||||
func replicasetStatus(u *unstructured.Unstructured) (string, error) {
|
||||
rs := &appsv1.ReplicaSet{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, rs); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
replicaFailure := false
|
||||
for _, condition := range rs.Status.Conditions {
|
||||
switch condition.Type {
|
||||
case appsv1.ReplicaSetReplicaFailure:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
replicaFailure = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if rs.Status.ObservedGeneration == rs.Generation &&
|
||||
rs.Status.Replicas == *rs.Spec.Replicas &&
|
||||
rs.Status.ReadyReplicas == *rs.Spec.Replicas &&
|
||||
rs.Status.AvailableReplicas == *rs.Spec.Replicas && !replicaFailure {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Daemonset
|
||||
func daemonsetStatus(u *unstructured.Unstructured) (string, error) {
|
||||
ds := &appsv1.DaemonSet{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, ds); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if ds.Status.ObservedGeneration == ds.Generation &&
|
||||
ds.Status.DesiredNumberScheduled == ds.Status.NumberAvailable &&
|
||||
ds.Status.DesiredNumberScheduled == ds.Status.NumberReady {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// PVC
|
||||
func pvcStatus(u *unstructured.Unstructured) (string, error) {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pvc); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if pvc.Status.Phase == corev1.ClaimBound {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Service
|
||||
func serviceStatus(u *unstructured.Unstructured) (string, error) {
|
||||
service := &corev1.Service{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, service); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
stype := service.Spec.Type
|
||||
|
||||
if stype == corev1.ServiceTypeClusterIP || stype == corev1.ServiceTypeNodePort || stype == corev1.ServiceTypeExternalName ||
|
||||
stype == corev1.ServiceTypeLoadBalancer && isEmpty(service.Spec.ClusterIP) &&
|
||||
len(service.Status.LoadBalancer.Ingress) > 0 && !hasEmptyIngressIP(service.Status.LoadBalancer.Ingress) {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// Pod
|
||||
func podStatus(u *unstructured.Unstructured) (string, error) {
|
||||
pod := &corev1.Pod{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pod); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == corev1.PodReady && (condition.Reason == "PodCompleted" || condition.Status == corev1.ConditionTrue) {
|
||||
return StatusReady, nil
|
||||
}
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
// PodDisruptionBudget
|
||||
func pdbStatus(u *unstructured.Unstructured) (string, error) {
|
||||
pdb := &policyv1beta1.PodDisruptionBudget{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, pdb); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if pdb.Status.ObservedGeneration == pdb.Generation &&
|
||||
pdb.Status.CurrentHealthy >= pdb.Status.DesiredHealthy {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
func replicationControllerStatus(u *unstructured.Unstructured) (string, error) {
|
||||
rc := &corev1.ReplicationController{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, rc); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if rc.Status.ObservedGeneration == rc.Generation &&
|
||||
rc.Status.Replicas == *rc.Spec.Replicas &&
|
||||
rc.Status.ReadyReplicas == *rc.Spec.Replicas &&
|
||||
rc.Status.AvailableReplicas == *rc.Spec.Replicas {
|
||||
return StatusReady, nil
|
||||
}
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
func jobStatus(u *unstructured.Unstructured) (string, error) {
|
||||
job := &batchv1.Job{}
|
||||
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, job); err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
|
||||
if job.Status.StartTime == nil {
|
||||
return StatusInProgress, nil
|
||||
}
|
||||
|
||||
return StatusReady, nil
|
||||
}
|
||||
|
||||
func hasEmptyIngressIP(ingress []corev1.LoadBalancerIngress) bool {
|
||||
for _, i := range ingress {
|
||||
if isEmpty(i.IP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(s string) bool {
|
||||
return len(strings.TrimSpace(s)) == 0
|
||||
}
|
||||
|
||||
func getConditionOfType(u *unstructured.Unstructured, conditionType string) (string, corev1.ConditionStatus, bool, error) {
|
||||
conditions, found, err := unstructured.NestedSlice(u.Object, "status", "conditions")
|
||||
if err != nil || !found {
|
||||
return "", corev1.ConditionFalse, false, err
|
||||
}
|
||||
|
||||
for _, c := range conditions {
|
||||
condition, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
t, found := condition["type"]
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
condType, ok := t.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if condType == conditionType {
|
||||
reason := condition["reason"].(string)
|
||||
conditionStatus := condition["status"].(string)
|
||||
return reason, corev1.ConditionStatus(conditionStatus), true, nil
|
||||
}
|
||||
}
|
||||
return "", corev1.ConditionFalse, false, nil
|
||||
}
|
||||
Reference in New Issue
Block a user