update application controllers log level

* [application] update application controllers log level

* update

---------

Signed-off-by: wenhaozhou <wenhaozhou@yunify.com>
Signed-off-by: hongming <coder.scala@gmail.com>
This commit is contained in:
hongming
2025-03-19 13:37:12 +08:00
committed by ks-ci-bot
parent 522d0b4de5
commit 0e76a4bcc5
5 changed files with 162 additions and 158 deletions

View File

@@ -9,13 +9,16 @@ import (
"context"
"strings"
"github.com/go-logr/logr"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
erro "errors"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
@@ -36,6 +39,7 @@ var _ kscontroller.Controller = &AppCategoryReconciler{}
type AppCategoryReconciler struct {
client.Client
logger logr.Logger
}
func (r *AppCategoryReconciler) Name() string {
@@ -48,6 +52,7 @@ func (r *AppCategoryReconciler) Enabled(clusterRole string) bool {
func (r *AppCategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
r.Client = mgr.GetClient()
r.logger = ctrl.Log.WithName("controllers").WithName(categoryController)
return ctrl.NewControllerManagedBy(mgr).
Named(categoryController).
For(&appv2.Category{}).
@@ -69,8 +74,8 @@ func (r *AppCategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) erro
}
func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
klog.V(4).Info("reconcile", "app category", req.String())
r.logger.V(4).Info("reconcile app category", "app category", req.String())
logger := r.logger.WithValues("app category", req.String())
category := &appv2.Category{}
if err := r.Client.Get(ctx, req.NamespacedName, category); err != nil {
if errors.IsNotFound(err) {
@@ -78,7 +83,7 @@ func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return reconcile.Result{}, r.ensureUncategorizedCategory()
}
// ignore exceptions caused by incorrectly adding app labels.
klog.Errorf("not found %s, check if you added the correct app category", req.String())
logger.Error(err, "not found, check if you added the correct app category")
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
@@ -93,7 +98,7 @@ func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
// our finalizer is present, so lets handle our external dependency
// remove our finalizer from the list and update it.
if category.Status.Total > 0 {
klog.Errorf("can not delete helm category: %s which owns applications", req.String())
logger.Error(erro.New("category is using"), "can not delete helm category, in which owns applications")
return reconcile.Result{}, nil
}
@@ -107,13 +112,13 @@ func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
appv2.RepoIDLabelKey: appv2.UploadRepoKey,
}
if err := r.List(ctx, apps, opts); err != nil {
klog.Errorf("failed to list apps: %v", err)
r.logger.Error(err, "failed to list apps")
return ctrl.Result{}, err
}
if category.Status.Total != len(apps.Items) {
category.Status.Total = len(apps.Items)
if err := r.Status().Update(ctx, category); err != nil {
klog.Errorf("failed to update category status: %v", err)
r.logger.Error(err, "failed to update category status")
return ctrl.Result{}, err
}
}
@@ -125,7 +130,7 @@ func (r *AppCategoryReconciler) ensureUncategorizedCategory() error {
ctg := &appv2.Category{}
err := r.Get(context.TODO(), types.NamespacedName{Name: appv2.UncategorizedCategoryID}, ctg)
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("failed to get uncategorized category: %v", err)
r.logger.Error(err, "failed to get uncategorized category")
return err
}
ctg.Name = appv2.UncategorizedCategoryID

View File

@@ -13,40 +13,32 @@ import (
"strings"
"time"
"k8s.io/client-go/rest"
batchv1 "k8s.io/api/batch/v1"
"kubesphere.io/utils/helm"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"kubesphere.io/api/constants"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/handler"
"kubesphere.io/utils/s3"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller"
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
"github.com/go-logr/logr"
helmrelease "helm.sh/helm/v3/pkg/release"
batchv1 "k8s.io/api/batch/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
"kubesphere.io/kubesphere/pkg/simple/client/application"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/api/constants"
"kubesphere.io/utils/helm"
"kubesphere.io/utils/s3"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"kubesphere.io/kubesphere/pkg/controller"
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
"kubesphere.io/kubesphere/pkg/simple/client/application"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
)
const (
@@ -76,9 +68,10 @@ func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
r.Client = mgr.GetClient()
clusterClientSet, err := clusterclient.NewClusterClientSet(mgr.GetCache())
if err != nil {
return fmt.Errorf("failed to create cluster client set: %v", err)
return fmt.Errorf("failed to create cluster client set")
}
r.clusterClientSet = clusterClientSet
r.logger = ctrl.Log.WithName("controllers").WithName(helminstallerController)
if r.HelmExecutorOptions == nil || r.HelmExecutorOptions.Image == "" {
return fmt.Errorf("helm executor options is nil or image is empty")
@@ -86,7 +79,7 @@ func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
r.logger.Error(err, "failed to init store")
return err
}
@@ -105,11 +98,11 @@ func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
func (r *AppReleaseReconciler) mapper(ctx context.Context, o client.Object) (requests []reconcile.Request) {
cluster := o.(*clusterv1alpha1.Cluster)
klog.Infof("cluster %s has been deleted", cluster.Name)
r.logger.V(4).Info("cluster has been deleted", "cluster", cluster)
apprlsList := &appv2.ApplicationReleaseList{}
opts := &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.ClusterNameLabelKey: cluster.Name})}
if err := r.List(ctx, apprlsList, opts); err != nil {
klog.Errorf("failed to list application releases: %v", err)
r.logger.Error(err, "failed to list application releases")
return requests
}
for _, apprls := range apprlsList.Items {
@@ -124,15 +117,15 @@ type AppReleaseReconciler struct {
HelmExecutorOptions *kscontroller.HelmExecutorOptions
ossStore s3.Interface
cmStore s3.Interface
logger logr.Logger
}
func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
apprls := &appv2.ApplicationRelease{}
if err := r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
logger := r.logger.WithValues("application release", apprls.Name).WithValues("namespace", apprls.Namespace)
timeoutRecheck := apprls.Annotations[appv2.TimeoutRecheck]
var reCheck int
if timeoutRecheck == "" {
@@ -143,12 +136,12 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
dstKubeConfig, runClient, err := r.getClusterInfo(apprls.GetRlsCluster())
if err != nil {
klog.Errorf("failed to get cluster info: %v", err)
logger.Error(err, "failed to get cluster info")
return ctrl.Result{}, err
}
executor, err := r.getExecutor(apprls, dstKubeConfig, runClient)
if err != nil {
klog.Errorf("failed to get executor: %v", err)
logger.Error(err, "failed to get executor")
return ctrl.Result{}, err
}
@@ -160,18 +153,18 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
helmKubeConfig, err := application.GetHelmKubeConfig(ctx, cluster, runClient)
if err != nil {
klog.Errorf("failed to get helm kubeconfig: %v", err)
logger.Error(err, "failed to get helm kubeconfig")
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) || (err == nil && !cluster.DeletionTimestamp.IsZero()) {
klog.Errorf("cluster not found or deleting %s: %v", apprls.GetRlsCluster(), err)
logger.Error(err, "cluster not found or deleting", "cluster", apprls.GetRlsCluster())
apprls.Status.State = appv2.StatusClusterDeleted
apprls.Status.Message = fmt.Sprintf("cluster %s has been deleted", cluster.Name)
patch, _ := json.Marshal(apprls)
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
logger.Error(err, "failed to update application release")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
@@ -180,7 +173,7 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
if !controllerutil.ContainsFinalizer(apprls, HelmReleaseFinalizer) && apprls.ObjectMeta.DeletionTimestamp.IsZero() {
expected := apprls.DeepCopy()
controllerutil.AddFinalizer(expected, HelmReleaseFinalizer)
klog.Infof("add finalizer for apprelease %s", apprls.Name)
logger.V(6).Info("add finalizer for application release")
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(apprls))
}
@@ -194,14 +187,14 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
wait, err := r.cleanJob(ctx, apprls, runClient)
if err != nil {
klog.Errorf("failed to clean job: %v", err)
logger.Error(err, "failed to clean job")
return ctrl.Result{}, err
}
if wait {
klog.Infof("job wait, job for %s is still active", apprls.Name)
logger.V(6).Info("job wait, job is still active")
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
klog.Infof("job for %s has been cleaned", apprls.Name)
logger.V(4).WithValues().Info("job has been cleaned")
if err = r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
@@ -209,10 +202,10 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
apprls.Finalizers = nil
err = r.Update(ctx, apprls)
if err != nil {
klog.Errorf("failed to remove finalizer for apprelease %s: %v", apprls.Name, err)
logger.Error(err, "failed to remove finalizer for application release")
return ctrl.Result{}, err
}
klog.Infof("remove finalizer for apprelease %s", apprls.Name)
logger.V(6).Info("remove finalizer for application release")
return ctrl.Result{}, nil
}
@@ -262,10 +255,10 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
if apprls.Status.State != appv2.StatusTimeout {
err = r.updateStatus(ctx, apprls, appv2.StatusTimeout, "Installation timeout")
if err != nil {
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
logger.Error(err, "failed to update application release status")
return ctrl.Result{}, err
}
klog.Infof("Installation timeout, will check status again after %d second", timeoutVerificationAgain)
logger.V(2).Info("installation timeout, will check status again after seconds", "timeout verification again", timeoutVerificationAgain)
return ctrl.Result{RequeueAfter: timeoutVerificationAgain * time.Second}, nil
}
@@ -278,15 +271,15 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
patch, _ := json.Marshal(apprls)
err = r.Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
logger.Error(err, "failed to update application release")
return ctrl.Result{}, err
}
klog.Infof("update recheck times %s for %s", strconv.Itoa(reCheck+1), apprls.Name)
logger.V(2).Info("update recheck times", "recheck times", strconv.Itoa(reCheck+1))
if deployed {
err = r.updateStatus(ctx, apprls, appv2.StatusActive, "StatusActive")
if err != nil {
klog.Errorf("failed to update apprelease %s %v", apprls.Name, err)
logger.Error(err, "failed to update application release")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
@@ -299,7 +292,7 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
err = r.updateStatus(ctx, apprls, appv2.StatusActive, release.Info.Description)
return ctrl.Result{}, err
default:
klog.V(5).Infof("helm release %s/%s status %s, check again after %d second", apprls.GetRlsNamespace(), apprls.Name, release.Info.Status, verificationAgain)
r.logger.V(5).Info(fmt.Sprintf("helm release %s/%s status %s, check again after %d seconds", apprls.GetRlsNamespace(), apprls.Name, release.Info.Status, verificationAgain))
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
}
}
@@ -313,12 +306,13 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
func (r *AppReleaseReconciler) checkJob(ctx context.Context, apprls *appv2.ApplicationRelease, runClient client.Client, release *helmrelease.Release) (ct ctrl.Result, todo bool, err error) {
klog.Infof("helm release %s/%s ready to create or upgrade yet,check job %s", apprls.GetRlsNamespace(), apprls.Name, apprls.Status.InstallJobName)
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
logger.V(4).Info("helm release %s/%s ready to create or upgrade yet,check job %s", apprls.GetRlsNamespace(), apprls.Name, apprls.Status.InstallJobName)
job := &batchv1.Job{}
if err := runClient.Get(ctx, types.NamespacedName{Namespace: apprls.GetRlsNamespace(), Name: apprls.Status.InstallJobName}, job); err != nil {
if apierrors.IsNotFound(err) {
klog.Errorf("job %s not found", apprls.Status.InstallJobName)
logger.Error(err, "job not found", "install job", apprls.Status.InstallJobName)
msg := "deploy failed, job not found"
return ctrl.Result{}, false, r.updateStatus(ctx, apprls, appv2.StatusDeployFailed, msg)
}
@@ -329,7 +323,7 @@ func (r *AppReleaseReconciler) checkJob(ctx context.Context, apprls *appv2.Appli
return ctrl.Result{}, false, r.updateStatus(ctx, apprls, appv2.StatusActive, "Upgrade succeeful")
}
if job.Status.Failed > 0 {
klog.Infof("install apprls %s job %s , failed times %d/%d", apprls.Name, job.Name, job.Status.Failed, *job.Spec.BackoffLimit+1)
logger.V(2).Info(fmt.Sprintf("install job failed, failed times %d/%d", job.Status.Failed, *job.Spec.BackoffLimit+1), "job", job.Name)
}
if job.Spec.BackoffLimit != nil && job.Status.Failed > *job.Spec.BackoffLimit {
// When in the upgrade state, if job execution fails while the HelmRelease status remains deployed, directly mark the AppRelease as StatusDeployFailed.
@@ -344,45 +338,47 @@ func (r *AppReleaseReconciler) checkJob(ctx context.Context, apprls *appv2.Appli
}
func (r *AppReleaseReconciler) removeAll(ctx context.Context, apprls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (ct ctrl.Result, err error) {
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
err = r.updateStatus(ctx, apprls, appv2.StatusDeleting, "Uninstalling")
if err != nil {
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
logger.Error(err, "failed to update application release status")
return ctrl.Result{}, err
}
uninstallJobName, err := r.uninstall(ctx, apprls, executor, kubeconfig)
if err != nil {
klog.Errorf("failed to uninstall helm release %s: %v", apprls.Name, err)
logger.Error(err, "failed to uninstall application release")
return ctrl.Result{}, err
}
err = r.cleanStore(ctx, apprls)
if err != nil {
klog.Errorf("failed to clean store: %v", err)
logger.Error(err, "failed to clean store")
return ctrl.Result{}, err
}
klog.Infof("remove apprelease %s success", apprls.Name)
logger.V(4).Info("remove application release success")
if uninstallJobName != "" {
klog.Infof("try to update uninstall apprls job name %s to apprelease %s", uninstallJobName, apprls.Name)
logger.V(4).Info("try to update application release uninstall job", "job", uninstallJobName)
apprls.Status.UninstallJobName = uninstallJobName
apprls.Status.LastUpdate = metav1.Now()
patch, _ := json.Marshal(apprls)
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
logger.Error(err, "failed to update application release")
return ctrl.Result{}, err
}
klog.Infof("update uninstall apprls job name %s to apprelease %s success", uninstallJobName, apprls.Name)
logger.V(4).Info("update application release uninstall job success", "job", uninstallJobName)
}
return ctrl.Result{}, nil
}
func (r *AppReleaseReconciler) getClusterDynamicClient(clusterName string, apprls *appv2.ApplicationRelease) (*dynamic.DynamicClient, error) {
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
clusterClient, err := r.clusterClientSet.GetClusterClient(clusterName)
if err != nil {
klog.Errorf("failed to get cluster client: %v", err)
logger.Error(err, "failed to get cluster client", "cluster", clusterName)
return nil, err
}
creator := apprls.Annotations[constants.CreatorAnnotationKey]
@@ -392,7 +388,7 @@ func (r *AppReleaseReconciler) getClusterDynamicClient(clusterName string, apprl
UserName: creator,
}
}
klog.Infof("DynamicClient impersonate kubeAsUser: %s", creator)
logger.V(4).Info("DynamicClient impersonate kubeAsUser", "creator", creator)
dynamicClient, err := dynamic.NewForConfig(&conf)
return dynamicClient, err
}

View File

@@ -9,9 +9,10 @@ import (
"context"
"strings"
"github.com/go-logr/logr"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"k8s.io/klog/v2"
"kubesphere.io/utils/s3"
"kubesphere.io/kubesphere/pkg/simple/client/application"
@@ -37,6 +38,7 @@ type AppVersionReconciler struct {
client.Client
ossStore s3.Interface
cmStore s3.Interface
logger logr.Logger
}
func (r *AppVersionReconciler) Name() string {
@@ -49,9 +51,10 @@ func (r *AppVersionReconciler) Enabled(clusterRole string) bool {
func (r *AppVersionReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
r.Client = mgr.GetClient()
r.logger = ctrl.Log.WithName("controllers").WithName(appVersionController)
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
r.logger.Error(err, "failed to init store")
return err
}
return ctrl.NewControllerManagedBy(mgr).
@@ -66,6 +69,7 @@ func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request)
if err := r.Client.Get(ctx, req.NamespacedName, appVersion); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
logger := r.logger.WithValues("application version", appVersion.Name)
if !controllerutil.ContainsFinalizer(appVersion, appv2.CleanupFinalizer) {
controllerutil.RemoveFinalizer(appVersion, appv2.StoreCleanFinalizer)
controllerutil.AddFinalizer(appVersion, appv2.CleanupFinalizer)
@@ -76,7 +80,7 @@ func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request)
if !appVersion.ObjectMeta.DeletionTimestamp.IsZero() {
err := r.deleteFile(ctx, appVersion)
if err != nil {
klog.Errorf("Failed to clean file for appversion %s: %v", appVersion.Name, err)
logger.Error(err, "Failed to clean file")
}
}
@@ -84,32 +88,33 @@ func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
func (r *AppVersionReconciler) deleteFile(ctx context.Context, appVersion *appv2.ApplicationVersion) error {
logger := r.logger.WithValues("application version", appVersion.Name)
defer func() {
controllerutil.RemoveFinalizer(appVersion, appv2.CleanupFinalizer)
err := r.Update(ctx, appVersion)
if err != nil {
klog.Errorf("Failed to remove finalizer from appversion %s: %v", appVersion.Name, err)
logger.Error(err, "Failed to remove finalizer from application version")
}
klog.Infof("Remove finalizer from appversion %s successfully", appVersion.Name)
logger.V(4).Info("Remove finalizer from application version %s successfully")
}()
klog.Infof("ApplicationVersion %s has been deleted, try to clean file", appVersion.Name)
logger.V(4).Info("ApplicationVersion has been deleted, try to clean file")
id := []string{appVersion.Name}
apprls := &appv2.ApplicationReleaseList{}
err := r.Client.List(ctx, apprls, client.MatchingLabels{appv2.AppVersionIDLabelKey: appVersion.Name})
if err != nil {
klog.Errorf("Failed to list ApplicationRelease: %v", err)
logger.Error(err, "Failed to list ApplicationRelease")
return err
}
if len(apprls.Items) > 0 {
klog.Infof("ApplicationVersion %s is still in use, keep file in store", appVersion.Name)
logger.V(4).Info("ApplicationVersion is still in use, keep file in store")
return nil
}
err = application.FailOverDelete(r.cmStore, r.ossStore, id)
if err != nil {
klog.Errorf("Fail to delete appversion %s from store: %v", appVersion.Name, err)
logger.Error(err, "Fail to delete application version from store")
return err
}
klog.Infof("Delete file %s from store successfully", appVersion.Name)
logger.V(4).Info("Delete file from store successfully")
return nil
}

View File

@@ -12,7 +12,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
appv2 "kubesphere.io/api/application/v2"
"kubesphere.io/utils/helm"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -22,21 +21,21 @@ import (
)
func (r *AppReleaseReconciler) uninstall(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (jobName string, err error) {
klog.Infof("uninstall helm release %s", rls.Name)
logger := r.logger.WithValues("application release", rls).WithValues("namespace", rls.Namespace)
logger.V(4).Info("uninstall helm release")
creator := rls.Annotations[constants.CreatorAnnotationKey]
klog.Infof("helm impersonate kubeAsUser: %s", creator)
logger.V(4).Info("helm impersonate kubeAsUser", "creator", creator)
options := []helm.HelmOption{
helm.SetNamespace(rls.GetRlsNamespace()),
helm.SetKubeconfig(kubeconfig),
}
if jobName, err = executor.Uninstall(ctx, rls.Name, options...); err != nil {
klog.Error(err, "failed to force delete helm release")
logger.Error(err, "failed to force delete helm release")
return jobName, err
}
klog.Infof("uninstall helm release %s success,job name: %s", rls.Name, jobName)
logger.Info("uninstall helm release success", "job", jobName)
return jobName, nil
}
@@ -49,12 +48,13 @@ func (r *AppReleaseReconciler) jobStatus(job *batchv1.Job) (active, completed, f
}
func (r *AppReleaseReconciler) createOrUpgradeAppRelease(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) error {
logger := r.logger.WithValues("application release", rls).WithValues("namespace", rls.Namespace)
clusterName := rls.GetRlsCluster()
namespace := rls.GetRlsNamespace()
klog.Infof("begin to create or upgrade %s app release %s in cluster %s ns: %s", rls.Spec.AppType, rls.Name, clusterName, namespace)
logger.V(6).Info("begin to create or upgrade app release", "cluster", clusterName)
creator := rls.Annotations[constants.CreatorAnnotationKey]
klog.Infof("helm impersonate kubeAsUser: %s", creator)
logger.V(6).Info("helm impersonate kubeAsUser", "creator", creator)
options := []helm.HelmOption{
helm.SetInstall(true),
helm.SetNamespace(namespace),
@@ -66,23 +66,23 @@ func (r *AppReleaseReconciler) createOrUpgradeAppRelease(ctx context.Context, rl
if rls.Spec.AppType == appv2.AppTypeHelm {
_, err := executor.Get(ctx, rls.Name, options...)
if err != nil && err.Error() == "release: not found" {
klog.Infof("release %s not found, begin to create", rls.Name)
logger.V(4).Info("release not found, begin to create")
}
if err == nil {
klog.Infof("release %s found, begin to upgrade", rls.Name)
logger.V(6).Info("release found, begin to upgrade")
state = appv2.StatusUpgraded
}
}
data, err := application.FailOverGet(r.cmStore, r.ossStore, rls.Spec.AppVersionID, r.Client, true)
if err != nil {
klog.Errorf("failed to get app version data, err: %v", err)
logger.Error(err, "failed to get app version data")
return err
}
options = append(options, helm.SetChartData(data))
if rls.Status.InstallJobName, err = executor.Upgrade(ctx, rls.Name, "", rls.Spec.Values, options...); err != nil {
klog.Errorf("failed to create executor job, err: %v", err)
logger.Error(err, "failed to create executor job")
return r.updateStatus(ctx, rls, appv2.StatusFailed, err.Error())
}
@@ -99,22 +99,23 @@ func (r *AppReleaseReconciler) getExecutor(apprls *appv2.ApplicationRelease, kub
}
func (r *AppReleaseReconciler) getYamlInstaller(runClient client.Client, apprls *appv2.ApplicationRelease) (executor helm.Executor, err error) {
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
dynamicClient, err := r.getClusterDynamicClient(apprls.GetRlsCluster(), apprls)
if err != nil {
klog.Errorf("failed to get dynamic client: %v", err)
logger.Error(err, "failed to get dynamic client")
return nil, err
}
jsonList, err := application.ReadYaml(apprls.Spec.Values)
if err != nil {
klog.Errorf("failed to read yaml: %v", err)
logger.Error(err, "failed to read yaml")
return nil, err
}
var gvrListInfo []application.InsInfo
for _, i := range jsonList {
gvr, utd, err := application.GetInfoFromBytes(i, runClient.RESTMapper())
if err != nil {
klog.Errorf("failed to get info from bytes: %v", err)
logger.Error(err, "failed to get info from bytes")
return nil, err
}
ins := application.InsInfo{
@@ -134,6 +135,7 @@ func (r *AppReleaseReconciler) getYamlInstaller(runClient client.Client, apprls
}
func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease, kubeconfig []byte) (executor helm.Executor, err error) {
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
executorOptions := []helm.ExecutorOption{
helm.SetExecutorKubeConfig(kubeconfig),
helm.SetExecutorNamespace(apprls.GetRlsNamespace()),
@@ -148,7 +150,7 @@ func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease,
executor, err = helm.NewExecutor(executorOptions...)
if err != nil {
klog.Errorf("failed to create helm executor: %v", err)
logger.Error(err, "failed to create helm executor")
return nil, err
}
@@ -156,25 +158,25 @@ func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease,
}
func (r *AppReleaseReconciler) cleanJob(ctx context.Context, apprls *appv2.ApplicationRelease, runClient client.Client) (wait bool, err error) {
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
jobs := &batchv1.JobList{}
opts := []client.ListOption{client.InNamespace(apprls.GetRlsNamespace()), client.MatchingLabels{appv2.AppReleaseReferenceLabelKey: apprls.Name}}
err = runClient.List(ctx, jobs, opts...)
if err != nil {
klog.Errorf("failed to list job for %s: %v", apprls.Name, err)
logger.Error(err, "failed to list job")
return false, err
}
if len(jobs.Items) == 0 {
klog.Infof("cluster: %s namespace: %s no job found for %s", apprls.GetRlsCluster(), apprls.GetRlsNamespace(), apprls.Name)
logger.V(6).Info("no job found", "cluster", apprls.GetRlsCluster())
return false, nil
}
klog.Infof("found %d jobs for %s", len(jobs.Items), apprls.Name)
logger.V(6).Info("found jobs", "job number", len(jobs.Items))
for _, job := range jobs.Items {
klog.Infof("begin to clean job %s/%s", job.Namespace, job.Name)
logger.V(6).Info("begin to clean job", "namespace", job.Namespace, "job", job.Name)
jobActive, jobCompleted, failed := r.jobStatus(&job)
if jobActive {
klog.Infof("job %s is still active", job.Name)
logger.V(6).Info("job is still active", "job", job.Name)
return true, nil
}
if jobCompleted || failed {
@@ -182,18 +184,18 @@ func (r *AppReleaseReconciler) cleanJob(ctx context.Context, apprls *appv2.Appli
opt := client.DeleteOptions{PropagationPolicy: &deletePolicy}
err = runClient.Delete(ctx, &job, &opt)
if err != nil {
klog.Errorf("failed to delete job %s: %v", job.Name, err)
logger.Error(err, "failed to delete job", "job", job.Name)
return false, err
}
klog.Infof("job %s has been deleted", job.Name)
logger.V(4).Info("job has been deleted", "job", job.Name)
} else {
klog.Infof("job:%s status unknown, wait for next reconcile: %v", job.Name, job.Status)
logger.V(4).Info("job status unknown, wait for next reconcile", "job", job.Name, "status", job.Status)
return true, nil
}
}
klog.Infof("all job has been deleted")
logger.Info("all job has been deleted")
return false, nil
}
@@ -202,13 +204,13 @@ func (r *AppReleaseReconciler) cleanStore(ctx context.Context, apprls *appv2.App
appVersion := &appv2.ApplicationVersion{}
err = r.Get(ctx, client.ObjectKey{Name: name}, appVersion)
if apierrors.IsNotFound(err) {
klog.Infof("appVersion %s has been deleted, cleanup file in oss", name)
r.logger.Info("application version has been deleted, cleanup file in oss", "application version", name)
err = application.FailOverDelete(r.cmStore, r.ossStore, []string{appVersion.Name})
if err != nil {
klog.Warningf("failed to cleanup file in oss: %v", err)
r.logger.Error(err, "failed to cleanup file in oss")
return nil
}
}
klog.Infof("appVersion %s still exists, no need to cleanup file in oss", name)
r.logger.V(6).Info("application version still exists, no need to cleanup file in oss", "application version", name)
return nil
}

View File

@@ -13,36 +13,31 @@ import (
"strings"
"time"
"k8s.io/utils/ptr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"kubesphere.io/api/constants"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/handler"
"kubesphere.io/utils/s3"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"github.com/go-logr/logr"
helmrepo "helm.sh/helm/v3/pkg/repo"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
appv2 "kubesphere.io/api/application/v2"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/api/constants"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
"kubesphere.io/utils/s3"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
"kubesphere.io/kubesphere/pkg/simple/client/application"
)
const helmRepoController = "helmrepo"
const helmRepoController = "helmrepo-controller"
var _ reconcile.Reconciler = &RepoReconciler{}
var _ kscontroller.Controller = &RepoReconciler{}
@@ -52,6 +47,7 @@ type RepoReconciler struct {
client.Client
ossStore s3.Interface
cmStore s3.Interface
logger logr.Logger
}
func (r *RepoReconciler) Name() string {
@@ -65,11 +61,11 @@ func (r *RepoReconciler) Enabled(clusterRole string) bool {
func (r *RepoReconciler) mapper(ctx context.Context, o client.Object) (requests []reconcile.Request) {
workspace := o.(*tenantv1beta1.WorkspaceTemplate)
klog.Infof("workspace %s has been deleted", workspace.Name)
r.logger.V(4).Info("workspace has been deleted", "workspace", workspace.Name)
repoList := &appv2.RepoList{}
opts := &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: workspace.Name})}
if err := r.List(ctx, repoList, opts); err != nil {
klog.Errorf("failed to list repo: %v", err)
r.logger.Error(err, "failed to list repo")
return requests
}
for _, repo := range repoList.Items {
@@ -81,10 +77,10 @@ func (r *RepoReconciler) mapper(ctx context.Context, o client.Object) (requests
func (r *RepoReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
r.Client = mgr.GetClient()
r.recorder = mgr.GetEventRecorderFor(helmRepoController)
r.logger = ctrl.Log.WithName("controllers").WithName(helmRepoController)
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
if err != nil {
klog.Errorf("failed to init store: %v", err)
r.logger.Error(err, "failed to init store")
return err
}
@@ -104,30 +100,32 @@ func (r *RepoReconciler) UpdateStatus(ctx context.Context, helmRepo *appv2.Repo)
newRepo.Name = helmRepo.Name
newRepo.Status.State = helmRepo.Status.State
newRepo.Status.LastUpdateTime = metav1.Now()
logger := r.logger.WithValues("repo", helmRepo.Name)
patch, _ := json.Marshal(newRepo)
err := r.Status().Patch(ctx, newRepo, client.RawPatch(client.Merge.Type(), patch))
if err != nil {
klog.Errorf("update status failed, error: %s", err)
logger.Error(err, "update status failed")
return err
}
klog.Infof("update repo %s status: %s", helmRepo.GetName(), helmRepo.Status.State)
logger.V(4).Info("update repo status", "status", helmRepo.Status.State)
return nil
}
func (r *RepoReconciler) skipSync(helmRepo *appv2.Repo) (bool, error) {
logger := r.logger.WithValues("repo", helmRepo.Name)
if helmRepo.Status.State == appv2.StatusManualTrigger || helmRepo.Status.State == appv2.StatusSyncing {
klog.Infof("repo: %s state: %s", helmRepo.GetName(), helmRepo.Status.State)
logger.V(4).Info(fmt.Sprintf("repo state: %s", helmRepo.Status.State))
return false, nil
}
if helmRepo.Spec.SyncPeriod == nil || *helmRepo.Spec.SyncPeriod == 0 {
klog.Infof("repo: %s no sync SyncPeriod=0", helmRepo.GetName())
logger.V(4).Info("repo no sync SyncPeriod=0")
return true, nil
}
passed := time.Since(helmRepo.Status.LastUpdateTime.Time).Seconds()
if helmRepo.Status.State == appv2.StatusSuccessful && passed < float64(*helmRepo.Spec.SyncPeriod) {
klog.Infof("last sync time is %s, passed %f, no need to sync, repo: %s", helmRepo.Status.LastUpdateTime, passed, helmRepo.GetName())
logger.V(4).Info(fmt.Sprintf("last sync time is %s, passed %f, no need to sync", helmRepo.Status.LastUpdateTime, passed))
return true, nil
}
return false, nil
@@ -155,10 +153,10 @@ func filterVersions(versions []*helmrepo.ChartVersion) []*helmrepo.ChartVersion
}
func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
logger := r.logger.WithValues("repo", request.Name)
helmRepo := &appv2.Repo{}
if err := r.Client.Get(ctx, request.NamespacedName, helmRepo); err != nil {
klog.Errorf("get helm repo failed, error: %s", err)
logger.Error(err, "get helm repo failed")
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if helmRepo.Status.State == "" {
@@ -178,10 +176,10 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
if workspaceName != "" {
err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, workspaceTemplate)
if apierrors.IsNotFound(err) || (err == nil && !workspaceTemplate.DeletionTimestamp.IsZero()) {
klog.Infof("workspace not found or deleting %s %s", workspaceName, err)
logger.V(4).Error(err, "workspace not found or deleting", "workspace", workspaceName)
err = r.Delete(ctx, helmRepo)
if err != nil {
klog.Errorf("delete helm repo failed, error: %s", err)
logger.Error(err, "delete helm repo failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
@@ -201,13 +199,13 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
err = r.UpdateStatus(ctx, helmRepo)
if err != nil {
klog.Errorf("update status failed, error: %s", err)
logger.Error(err, "update status failed")
return reconcile.Result{}, err
}
index, err := application.LoadRepoIndex(helmRepo.Spec.Url, helmRepo.Spec.Credential)
if err != nil {
klog.Errorf("load index failed, repo: %s, url: %s, err: %s", helmRepo.GetName(), helmRepo.Spec.Url, err)
logger.Error(err, "load index failed", "url", helmRepo.Spec.Url)
return reconcile.Result{}, err
}
@@ -217,7 +215,7 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
}
err = r.Client.List(ctx, appList, &opts)
if err != nil {
klog.Errorf("list appversion failed, error: %s", err)
logger.Error(err, "list application failed")
return reconcile.Result{}, err
}
indexMap := make(map[string]struct{})
@@ -228,10 +226,10 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
}
for _, i := range appList.Items {
if _, exists := indexMap[i.Name]; !exists {
klog.Infof("app %s has been removed from the repo", i.Name)
logger.V(4).Info("application has been removed from the repo", "application", i.Name)
err = r.Client.Delete(ctx, &i)
if err != nil {
klog.Errorf("delete app %s failed, error: %s", i.Name, err)
logger.Error(err, "delete application failed", "application", i.Name)
return reconcile.Result{}, err
}
}
@@ -239,25 +237,22 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
for appName, versions := range index.Entries {
if len(versions) == 0 {
klog.Infof("no version found for %s", appName)
logger.V(4).Info("no version found for application", "application", appName)
continue
}
versions = filterVersions(versions)
if len(versions) > appv2.MaxNumOfVersions {
versions = versions[:appv2.MaxNumOfVersions]
}
vRequests, err := repoParseRequest(r.Client, versions, helmRepo, appName, appList)
vRequests, err := r.repoParseRequest(ctx, versions, helmRepo, appName, appList)
if err != nil {
klog.Errorf("parse request failed, error: %s", err)
logger.Error(err, "parse request failed")
return reconcile.Result{}, err
}
if len(vRequests) == 0 {
continue
}
klog.Infof("found %d/%d versions for %s need to upgrade or create", len(vRequests), len(versions), appName)
logger.V(6).Info(fmt.Sprintf("found %d/%d versions for application %s need to upgrade or create", len(vRequests), len(versions), appName))
own := metav1.OwnerReference{
APIVersion: appv2.SchemeGroupVersion.String(),
@@ -266,7 +261,7 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
UID: helmRepo.UID,
}
if err = application.CreateOrUpdateApp(r.Client, vRequests, r.cmStore, r.ossStore, own); err != nil {
klog.Errorf("create or update app failed, error: %s", err)
logger.Error(err, "create or update app failed")
return reconcile.Result{}, err
}
}
@@ -274,7 +269,7 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
helmRepo.Status.State = appv2.StatusSuccessful
err = r.UpdateStatus(ctx, helmRepo)
if err != nil {
klog.Errorf("update status failed, error: %s", err)
logger.Error(err, "update status failed")
return reconcile.Result{}, err
}
@@ -283,9 +278,10 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRepo *appv2.Repo, appName string, appList *appv2.ApplicationList) (createOrUpdateList []application.AppRequest, err error) {
func (r *RepoReconciler) repoParseRequest(ctx context.Context, versions helmrepo.ChartVersions, helmRepo *appv2.Repo, appName string, appList *appv2.ApplicationList) (createOrUpdateList []application.AppRequest, err error) {
appVersionList := &appv2.ApplicationVersionList{}
logger := r.logger.WithValues("repo", helmRepo.Name)
appID := fmt.Sprintf("%s-%s", helmRepo.Name, application.GenerateShortNameMD5Hash(appName))
opts := client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{
@@ -293,9 +289,9 @@ func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRe
appv2.AppIDLabelKey: appID,
}),
}
err = cli.List(context.Background(), appVersionList, &opts)
err = r.Client.List(ctx, appVersionList, &opts)
if err != nil {
klog.Errorf("list appversion failed, error: %s", err)
logger.Error(err, "list application version failed")
return nil, err
}
@@ -309,14 +305,14 @@ func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRe
}
for _, i := range appVersionList.Items {
legalVersion := application.FormatVersion(i.Spec.VersionName)
key := fmt.Sprintf("%s-%s", i.GetLabels()[appv2.AppIDLabelKey], legalVersion)
LegalVersion := application.FormatVersion(i.Spec.VersionName)
key := fmt.Sprintf("%s-%s", i.GetLabels()[appv2.AppIDLabelKey], LegalVersion)
_, exists := versionMap[key]
if !exists {
klog.Infof("delete appversion %s", i.GetName())
err = cli.Delete(context.Background(), &i)
logger.V(4).Info("delete application version", "application version", i.GetName())
err = r.Client.Delete(ctx, &i)
if err != nil {
klog.Errorf("delete appversion failed, error: %s", err)
logger.Error(err, "delete application version failed")
return nil, err
}
} else {
@@ -333,7 +329,7 @@ func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRe
continue
}
if dig != "" {
klog.Infof("digest not match, key: %s, digest: %s, ver.Digest: %s", key, dig, ver.Digest)
logger.V(4).Info(fmt.Sprintf("digest not match, key: %s, digest: %s, ver.Digest: %s", key, dig, ver.Digest))
}
vRequest := generateVRequest(helmRepo, ver, shortName, appName)
createOrUpdateList = append(createOrUpdateList, vRequest)