openpitrix crd
Signed-off-by: LiHui <andrewli@yunify.com> delete helm repo, release and app Signed-off-by: LiHui <andrewli@yunify.com> Fix Dockerfile Signed-off-by: LiHui <andrewli@yunify.com> add unit test for category controller Signed-off-by: LiHui <andrewli@yunify.com> resource api Signed-off-by: LiHui <andrewli@yunify.com> miscellaneous Signed-off-by: LiHui <andrewli@yunify.com> resource api Signed-off-by: LiHui <andrewli@yunify.com> add s3 repo indx Signed-off-by: LiHui <andrewli@yunify.com> attachment api Signed-off-by: LiHui <andrewli@yunify.com> repo controller test Signed-off-by: LiHui <andrewli@yunify.com> application controller test Signed-off-by: LiHui <andrewli@yunify.com> release metric Signed-off-by: LiHui <andrewli@yunify.com> helm release controller test Signed-off-by: LiHui <andrewli@yunify.com> move constants to /pkg/apis/application Signed-off-by: LiHui <andrewli@yunify.com> remove unused code Signed-off-by: LiHui <andrewli@yunify.com> add license header Signed-off-by: LiHui <andrewli@yunify.com> Fix bugs Signed-off-by: LiHui <andrewli@yunify.com> cluster cluent Signed-off-by: LiHui <andrewli@yunify.com> format code Signed-off-by: LiHui <andrewli@yunify.com> move workspace,cluster from spec to labels Signed-off-by: LiHui <andrewli@yunify.com> add license header Signed-off-by: LiHui <andrewli@yunify.com> openpitrix test Signed-off-by: LiHui <andrewli@yunify.com> add worksapce labels for app in appstore Signed-off-by: LiHui <andrewli@yunify.com>
This commit is contained in:
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmapplication
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMetrics()
|
||||
}
|
||||
|
||||
const (
|
||||
helmApplicationControllerName = "helm-application-controller"
|
||||
)
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileHelmApplication{}
|
||||
|
||||
// ReconcileHelmApplication reconciles a federated helm application object
|
||||
type ReconcileHelmApplication struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
const (
|
||||
appFinalizer = "helmapplication.application.kubesphere.io"
|
||||
)
|
||||
|
||||
func (r *ReconcileHelmApplication) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
klog.V(4).Info("sync helm application")
|
||||
|
||||
rootCtx := context.Background()
|
||||
app := &v1alpha1.HelmApplication{}
|
||||
err := r.Client.Get(rootCtx, request.NamespacedName, app)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if app.DeletionTimestamp == nil {
|
||||
// new app, update finalizer
|
||||
if !sliceutil.HasString(app.ObjectMeta.Finalizers, appFinalizer) {
|
||||
app.ObjectMeta.Finalizers = append(app.ObjectMeta.Finalizers, appFinalizer)
|
||||
if err := r.Update(rootCtx, app); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// create app success
|
||||
appOperationTotal.WithLabelValues("creation", app.GetTrueName(), strconv.FormatBool(inAppStore(app))).Inc()
|
||||
}
|
||||
|
||||
if !inAppStore(app) {
|
||||
if app.Status.State == v1alpha1.StateActive ||
|
||||
app.Status.State == v1alpha1.StateSuspended {
|
||||
return reconcile.Result{}, r.createAppCopyInAppStore(rootCtx, app)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// delete app copy in appStore
|
||||
if !inAppStore(app) {
|
||||
if err := r.deleteAppCopyInAppStore(rootCtx, app.Name); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
app.ObjectMeta.Finalizers = sliceutil.RemoveString(app.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == appFinalizer
|
||||
})
|
||||
klog.V(4).Info("update app")
|
||||
if err := r.Update(rootCtx, app); err != nil {
|
||||
klog.Errorf("update app failed, error: %s", err)
|
||||
return ctrl.Result{}, err
|
||||
} else {
|
||||
// delete app success
|
||||
appOperationTotal.WithLabelValues("deletion", app.GetTrueName(), strconv.FormatBool(inAppStore(app))).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmApplication) deleteAppCopyInAppStore(ctx context.Context, name string) error {
|
||||
appInStore := &v1alpha1.HelmApplication{}
|
||||
err := r.Client.Get(ctx, types.NamespacedName{Name: fmt.Sprintf("%s%s", name, v1alpha1.HelmApplicationAppStoreSuffix)}, appInStore)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = r.Delete(ctx, appInStore)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a application copy in app store
|
||||
func (r *ReconcileHelmApplication) createAppCopyInAppStore(ctx context.Context, from *v1alpha1.HelmApplication) error {
|
||||
name := fmt.Sprintf("%s%s", from.Name, v1alpha1.HelmApplicationAppStoreSuffix)
|
||||
|
||||
app := &v1alpha1.HelmApplication{}
|
||||
err := r.Get(ctx, types.NamespacedName{Name: name}, app)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if app.Name == "" {
|
||||
app.Name = name
|
||||
labels := from.Labels
|
||||
if len(labels) == 0 {
|
||||
labels = make(map[string]string, 3)
|
||||
}
|
||||
labels[constants.ChartRepoIdLabelKey] = v1alpha1.AppStoreRepoId
|
||||
|
||||
// assign a category to app
|
||||
if labels[constants.CategoryIdLabelKey] == "" {
|
||||
labels[constants.CategoryIdLabelKey] = v1alpha1.UncategorizedId
|
||||
}
|
||||
labels[v1alpha1.OriginWorkspaceLabelKey] = from.GetWorkspace()
|
||||
|
||||
// apps in store are global resource.
|
||||
delete(labels, constants.WorkspaceLabelKey)
|
||||
app.Labels = labels
|
||||
|
||||
app.Spec = *from.Spec.DeepCopy()
|
||||
|
||||
err = r.Create(context.TODO(), app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if app.Status.State == "" {
|
||||
// update status if needed
|
||||
return updateHelmApplicationStatus(r.Client, from.Name, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmApplication) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.HelmApplication{}).Complete(r)
|
||||
}
|
||||
|
||||
func inAppStore(app *v1alpha1.HelmApplication) bool {
|
||||
return strings.HasSuffix(app.Name, v1alpha1.HelmApplicationAppStoreSuffix)
|
||||
}
|
||||
@@ -0,0 +1,283 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmapplication
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/Masterminds/semver/v3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
HelmAppVersionFinalizer = "helmappversion.application.kubesphere.io"
|
||||
)
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileHelmApplicationVersion{}
|
||||
|
||||
// ReconcileHelmApplicationVersion reconciles a helm application version object
|
||||
type ReconcileHelmApplicationVersion struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a helmapplicationversions object and makes changes based on the state read
|
||||
// and what is in the helmapplicationversions.Spec
|
||||
func (r *ReconcileHelmApplicationVersion) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
start := time.Now()
|
||||
klog.V(4).Infof("sync helm application version: %s", request.String())
|
||||
defer func() {
|
||||
klog.V(4).Infof("sync helm application version end: %s, elapsed: %v", request.String(), time.Now().Sub(start))
|
||||
}()
|
||||
|
||||
appVersion := &v1alpha1.HelmApplicationVersion{}
|
||||
err := r.Client.Get(context.TODO(), request.NamespacedName, appVersion)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if appVersion.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
|
||||
if appVersion.Status.State == "" {
|
||||
// set status to draft
|
||||
return reconcile.Result{}, r.updateStatus(appVersion)
|
||||
}
|
||||
|
||||
if !sliceutil.HasString(appVersion.ObjectMeta.Finalizers, HelmAppVersionFinalizer) {
|
||||
appVersion.ObjectMeta.Finalizers = append(appVersion.ObjectMeta.Finalizers, HelmAppVersionFinalizer)
|
||||
if err := r.Update(context.Background(), appVersion); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
} else {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(appVersion.ObjectMeta.Finalizers, HelmAppVersionFinalizer) {
|
||||
// update related helm application
|
||||
err = updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), false)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
err = updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), true)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// Delete HelmApplicationVersion
|
||||
appVersion.ObjectMeta.Finalizers = sliceutil.RemoveString(appVersion.ObjectMeta.Finalizers, func(item string) bool {
|
||||
if item == HelmAppVersionFinalizer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err := r.Update(context.Background(), appVersion); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
// update related helm application
|
||||
err = updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), false)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if appVersion.Status.State == v1alpha1.StateActive {
|
||||
// add labels to helm application version
|
||||
// The label will exists forever, since this helmapplicationversion's state only can be active and suspend.
|
||||
if appVersion.GetHelmRepoId() == "" {
|
||||
instanceCopy := appVersion.DeepCopy()
|
||||
instanceCopy.Labels[constants.ChartRepoIdLabelKey] = v1alpha1.AppStoreRepoId
|
||||
patch := client.MergeFrom(appVersion)
|
||||
err = r.Client.Patch(context.TODO(), instanceCopy, patch)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
app := v1alpha1.HelmApplication{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: appVersion.GetHelmApplicationId()}, &app)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), true)
|
||||
} else if appVersion.Status.State == v1alpha1.StateSuspended {
|
||||
return reconcile.Result{}, updateHelmApplicationStatus(r.Client, appVersion.GetHelmApplicationId(), true)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func updateHelmApplicationStatus(c client.Client, appId string, inAppStore bool) error {
|
||||
app := v1alpha1.HelmApplication{}
|
||||
|
||||
var err error
|
||||
if inAppStore {
|
||||
// application name ends with `-store`
|
||||
err = c.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s%s", appId, v1alpha1.HelmApplicationAppStoreSuffix)}, &app)
|
||||
} else {
|
||||
err = c.Get(context.TODO(), types.NamespacedName{Name: appId}, &app)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !app.DeletionTimestamp.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var versions v1alpha1.HelmApplicationVersionList
|
||||
err = c.List(context.TODO(), &versions, client.MatchingLabels{
|
||||
constants.ChartApplicationIdLabelKey: appId,
|
||||
})
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
latestVersionName := getLatestVersionName(versions, inAppStore)
|
||||
state := mergeApplicationVersionState(versions)
|
||||
|
||||
now := time.Now()
|
||||
if state != app.Status.State {
|
||||
// update StatusTime when state changed
|
||||
app.Status.StatusTime = &metav1.Time{Time: now}
|
||||
}
|
||||
|
||||
if state != app.Status.State || latestVersionName != app.Status.LatestVersion {
|
||||
app.Status.State = state
|
||||
app.Status.LatestVersion = latestVersionName
|
||||
app.Status.UpdateTime = &metav1.Time{Time: now}
|
||||
err := c.Status().Update(context.TODO(), &app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmApplicationVersion) updateStatus(appVersion *v1alpha1.HelmApplicationVersion) error {
|
||||
appVersion.Status = v1alpha1.HelmApplicationVersionStatus{
|
||||
State: v1alpha1.StateDraft,
|
||||
Audit: []v1alpha1.Audit{
|
||||
{
|
||||
State: v1alpha1.StateDraft,
|
||||
Time: appVersion.CreationTimestamp,
|
||||
Operator: appVersion.GetCreator(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := r.Status().Update(context.TODO(), appVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLatestVersionName(versions v1alpha1.HelmApplicationVersionList, inAppStore bool) string {
|
||||
l := versions.Items
|
||||
if len(l) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
verInd := 0
|
||||
if inAppStore {
|
||||
// only check active app version
|
||||
for ; verInd < len(l); verInd++ {
|
||||
if l[verInd].Status.State == v1alpha1.StateActive {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if verInd == len(l) {
|
||||
return ""
|
||||
}
|
||||
|
||||
latestSemver, _ := semver.NewVersion(l[verInd].GetSemver())
|
||||
|
||||
for i := verInd + 1; i < len(l); i++ {
|
||||
curr, _ := semver.NewVersion(l[i].GetSemver())
|
||||
if inAppStore {
|
||||
if l[i].Status.State != v1alpha1.StateActive {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if latestSemver.LessThan(curr) {
|
||||
verInd = i
|
||||
}
|
||||
}
|
||||
|
||||
return l[verInd].GetVersionName()
|
||||
}
|
||||
|
||||
func mergeApplicationVersionState(versions v1alpha1.HelmApplicationVersionList) string {
|
||||
states := make(map[string]int, len(versions.Items))
|
||||
|
||||
for _, version := range versions.Items {
|
||||
if version.DeletionTimestamp == nil {
|
||||
state := version.Status.State
|
||||
states[state] = states[state] + 1
|
||||
}
|
||||
}
|
||||
|
||||
// If there is on active appVersion, the helm application is active
|
||||
if states[v1alpha1.StateActive] > 0 {
|
||||
return v1alpha1.StateActive
|
||||
}
|
||||
|
||||
// All appVersion is draft, the helm application is draft
|
||||
if states[v1alpha1.StateDraft] == len(versions.Items) {
|
||||
return v1alpha1.StateDraft
|
||||
}
|
||||
|
||||
if states[v1alpha1.StateSuspended] > 0 {
|
||||
return v1alpha1.StateSuspended
|
||||
}
|
||||
|
||||
// default state is draft
|
||||
return v1alpha1.StateDraft
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmApplicationVersion) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.HelmApplicationVersion{}).
|
||||
Complete(r)
|
||||
}
|
||||
44
pkg/controller/openpitrix/helmapplication/metrics.go
Normal file
44
pkg/controller/openpitrix/helmapplication/metrics.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmapplication
|
||||
|
||||
import (
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"kubesphere.io/kubesphere/pkg/utils/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
appOperationTotal = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: "ks_cm",
|
||||
Name: "helm_application_operation_total",
|
||||
Help: "Counter of app creation and deletion",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"verb", "name", "appstore"},
|
||||
)
|
||||
|
||||
metricsList = []compbasemetrics.Registerable{
|
||||
appOperationTotal,
|
||||
}
|
||||
)
|
||||
|
||||
func registerMetrics() {
|
||||
for _, m := range metricsList {
|
||||
metrics.MustRegister(m)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,337 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmcategory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
HelmCategoryFinalizer = "helmcategories.application.kubesphere.io"
|
||||
)
|
||||
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileHelmCategory{Client: mgr.GetClient(), Scheme: mgr.GetScheme()}
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("helm-category-controller", mgr, controller.Options{Reconciler: r})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to HelmCategory
|
||||
err = c.Watch(&source.Kind{Type: &v1alpha1.HelmCategory{}}, &handler.EnqueueRequestForObject{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reconcileObj := r.(*ReconcileHelmCategory)
|
||||
// Watch for changes to HelmApplication
|
||||
err = c.Watch(&source.Kind{Type: &v1alpha1.HelmApplication{}}, &handler.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
app := event.Object.(*v1alpha1.HelmApplication)
|
||||
err := reconcileObj.updateUncategorizedApplicationLabels(app)
|
||||
if err != nil {
|
||||
limitingInterface.AddAfter(event, 20*time.Second)
|
||||
return
|
||||
}
|
||||
|
||||
repoId := app.GetHelmRepoId()
|
||||
if repoId == v1alpha1.AppStoreRepoId {
|
||||
ctgId := app.GetHelmCategoryId()
|
||||
if ctgId == "" {
|
||||
ctgId = v1alpha1.UncategorizedId
|
||||
}
|
||||
err := reconcileObj.updateCategoryCount(ctgId)
|
||||
if err != nil {
|
||||
klog.Errorf("reconcile category %s failed, error: %s", ctgId, err)
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
oldApp := updateEvent.ObjectOld.(*v1alpha1.HelmApplication)
|
||||
newApp := updateEvent.ObjectNew.(*v1alpha1.HelmApplication)
|
||||
err := reconcileObj.updateUncategorizedApplicationLabels(newApp)
|
||||
if err != nil {
|
||||
limitingInterface.AddAfter(updateEvent, 20*time.Second)
|
||||
return
|
||||
}
|
||||
var oldId string
|
||||
repoId := newApp.GetHelmRepoId()
|
||||
if repoId == v1alpha1.AppStoreRepoId {
|
||||
oldId = oldApp.GetHelmCategoryId()
|
||||
if oldId == "" {
|
||||
oldId = v1alpha1.UncategorizedId
|
||||
}
|
||||
err := reconcileObj.updateCategoryCount(oldId)
|
||||
if err != nil {
|
||||
klog.Errorf("reconcile category %s failed, error: %s", oldId, err)
|
||||
}
|
||||
}
|
||||
|
||||
// new labels and new repo id
|
||||
repoId = newApp.GetHelmRepoId()
|
||||
if repoId == v1alpha1.AppStoreRepoId {
|
||||
// new category id
|
||||
newId := newApp.GetHelmCategoryId()
|
||||
if newId == "" {
|
||||
newId = v1alpha1.UncategorizedId
|
||||
}
|
||||
if oldId != newId {
|
||||
err := reconcileObj.updateCategoryCount(newId)
|
||||
if err != nil {
|
||||
klog.Errorf("reconcile category %s failed, error: %s", newId, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
|
||||
app := deleteEvent.Object.(*v1alpha1.HelmApplication)
|
||||
repoId := app.GetHelmRepoId()
|
||||
if repoId == v1alpha1.AppStoreRepoId {
|
||||
id := app.GetHelmCategoryId()
|
||||
if id == "" {
|
||||
id = v1alpha1.UncategorizedId
|
||||
}
|
||||
err := reconcileObj.updateCategoryCount(id)
|
||||
if err != nil {
|
||||
klog.Errorf("reconcile category %s failed, error: %s", id, err)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
// create Uncategorized object
|
||||
ticker := time.NewTicker(15 * time.Second)
|
||||
for range ticker.C {
|
||||
ctg := &v1alpha1.HelmCategory{}
|
||||
err := reconcileObj.Get(context.TODO(), types.NamespacedName{Name: v1alpha1.UncategorizedId}, ctg)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("get helm category: %s failed, error: %s", v1alpha1.UncategorizedId, err)
|
||||
}
|
||||
if ctg.Name != "" {
|
||||
// category exists now
|
||||
return
|
||||
}
|
||||
|
||||
ctg = &v1alpha1.HelmCategory{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: v1alpha1.UncategorizedId,
|
||||
},
|
||||
Spec: v1alpha1.HelmCategorySpec{
|
||||
Description: v1alpha1.UncategorizedName,
|
||||
Name: v1alpha1.UncategorizedName,
|
||||
},
|
||||
}
|
||||
err = reconcileObj.Create(context.TODO(), ctg)
|
||||
if err != nil {
|
||||
klog.Errorf("create helm category: %s failed, error: %s", v1alpha1.UncategorizedName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileHelmCategory{}
|
||||
|
||||
// ReconcileWorkspace reconciles a Workspace object
|
||||
type ReconcileHelmCategory struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
config *rest.Config
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a helmcategories object and makes changes based on the state read
|
||||
// and what is in the helmreleases.Spec
|
||||
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmcategories,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmcategories/status,verbs=get;update;patch
|
||||
func (r *ReconcileHelmCategory) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
start := time.Now()
|
||||
klog.V(4).Infof("sync helm category: %s", request.String())
|
||||
defer func() {
|
||||
klog.V(4).Infof("sync helm category end: %s, elapsed: %v", request.String(), time.Now().Sub(start))
|
||||
}()
|
||||
|
||||
instance := &v1alpha1.HelmCategory{}
|
||||
err := r.Client.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if request.Name == v1alpha1.UncategorizedId {
|
||||
err = r.ensureUncategorizedCategory()
|
||||
// If create uncategorized category failed, we need create it again
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object.
|
||||
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmCategoryFinalizer) {
|
||||
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmCategoryFinalizer)
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmCategoryFinalizer) {
|
||||
// our finalizer is present, so lets handle our external dependency
|
||||
// remove our finalizer from the list and update it.
|
||||
|
||||
if instance.Status.Total > 0 {
|
||||
klog.Errorf("can not delete helm category: %s which owns applications", request.String())
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
|
||||
if item == HelmCategoryFinalizer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
err = r.updateCategoryCount(instance.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("update helm category: %s status failed, error: %s", instance.Name, err)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmCategory) ensureUncategorizedCategory() error {
|
||||
ctg := &v1alpha1.HelmCategory{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: v1alpha1.UncategorizedId}, ctg)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
ctg.Name = v1alpha1.UncategorizedId
|
||||
ctg.Spec.Name = v1alpha1.UncategorizedName
|
||||
ctg.Spec.Description = v1alpha1.UncategorizedName
|
||||
err = r.Create(context.TODO(), ctg)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmCategory) updateCategoryCount(id string) error {
|
||||
ctg := &v1alpha1.HelmCategory{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: id}, ctg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
count, err := r.countApplications(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctg.Status.Total == count {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctg.Status.Total = count
|
||||
|
||||
err = r.Status().Update(context.TODO(), ctg)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmCategory) countApplications(id string) (int, error) {
|
||||
list := v1alpha1.HelmApplicationList{}
|
||||
var err error
|
||||
err = r.List(context.TODO(), &list, client.MatchingLabels{
|
||||
constants.CategoryIdLabelKey: id,
|
||||
constants.ChartRepoIdLabelKey: v1alpha1.AppStoreRepoId,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
count := 0
|
||||
// just count active helm application
|
||||
for _, app := range list.Items {
|
||||
if app.Status.State == v1alpha1.StateActive {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// add category id to helm application
|
||||
func (r *ReconcileHelmCategory) updateUncategorizedApplicationLabels(app *v1alpha1.HelmApplication) error {
|
||||
if app == nil {
|
||||
return nil
|
||||
}
|
||||
if app.GetHelmRepoId() == v1alpha1.AppStoreRepoId && app.GetHelmCategoryId() == "" {
|
||||
appCopy := app.DeepCopy()
|
||||
appCopy.Labels[constants.CategoryIdLabelKey] = v1alpha1.UncategorizedId
|
||||
patch := client.MergeFrom(app)
|
||||
err := r.Client.Patch(context.TODO(), appCopy, patch)
|
||||
if err != nil {
|
||||
klog.Errorf("patch application: %s failed, error: %s", app.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmcategory
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"kubesphere.io/kubesphere/pkg/controller/openpitrix/helmapplication"
|
||||
"os"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestHelmCategoryController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"HelmCategory Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
MetricsBindAddress: "0",
|
||||
Scheme: scheme.Scheme,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = Add(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&helmapplication.ReconcileHelmApplication{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = (&helmapplication.ReconcileHelmApplicationVersion{}).SetupWithManager(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@@ -0,0 +1,132 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmcategory
|
||||
|
||||
import (
|
||||
"context"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ = Describe("helmCategory", func() {
|
||||
|
||||
const timeout = time.Second * 240
|
||||
const interval = time.Second * 1
|
||||
|
||||
app := createApp()
|
||||
appVer := createAppVersion(app.GetHelmApplicationId())
|
||||
ctg := createCtg()
|
||||
|
||||
BeforeEach(func() {
|
||||
err := k8sClient.Create(context.Background(), app)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = k8sClient.Create(context.Background(), appVer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = k8sClient.Create(context.Background(), ctg)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
Context("Helm category Controller", func() {
|
||||
It("Should success", func() {
|
||||
key := types.NamespacedName{
|
||||
Name: v1alpha1.UncategorizedId,
|
||||
}
|
||||
|
||||
By("Expecting category should exists")
|
||||
Eventually(func() bool {
|
||||
f := &v1alpha1.HelmCategory{}
|
||||
k8sClient.Get(context.Background(), key, f)
|
||||
return !f.CreationTimestamp.IsZero()
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Update helm app version status")
|
||||
Eventually(func() bool {
|
||||
k8sClient.Get(context.Background(), types.NamespacedName{Name: appVer.Name}, appVer)
|
||||
appVer.Status = v1alpha1.HelmApplicationVersionStatus{
|
||||
State: v1alpha1.StateActive,
|
||||
}
|
||||
err := k8sClient.Status().Update(context.Background(), appVer)
|
||||
return err == nil
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Wait for app status become active")
|
||||
Eventually(func() bool {
|
||||
appKey := types.NamespacedName{
|
||||
Name: app.Name,
|
||||
}
|
||||
k8sClient.Get(context.Background(), appKey, app)
|
||||
return app.State() == v1alpha1.StateActive
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
By("Reconcile for `uncategorized` category")
|
||||
Eventually(func() bool {
|
||||
key := types.NamespacedName{Name: v1alpha1.UncategorizedId}
|
||||
ctg := v1alpha1.HelmCategory{}
|
||||
k8sClient.Get(context.Background(), key, &ctg)
|
||||
|
||||
return ctg.Status.Total == 1
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func createCtg() *v1alpha1.HelmCategory {
|
||||
return &v1alpha1.HelmCategory{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmCategoryIdPrefix),
|
||||
},
|
||||
Spec: v1alpha1.HelmCategorySpec{
|
||||
Name: "dummy-ctg",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createApp() *v1alpha1.HelmApplication {
|
||||
return &v1alpha1.HelmApplication{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmApplicationIdPrefix),
|
||||
},
|
||||
Spec: v1alpha1.HelmApplicationSpec{
|
||||
Name: "dummy-chart",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createAppVersion(appId string) *v1alpha1.HelmApplicationVersion {
|
||||
return &v1alpha1.HelmApplicationVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix),
|
||||
Labels: map[string]string{
|
||||
constants.ChartApplicationIdLabelKey: appId,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.HelmApplicationVersionSpec{
|
||||
Metadata: &v1alpha1.Metadata{
|
||||
Version: "0.0.1",
|
||||
Name: "dummy-chart",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
472
pkg/controller/openpitrix/helmrelease/helm_release_controller.go
Normal file
472
pkg/controller/openpitrix/helmrelease/helm_release_controller.go
Normal file
@@ -0,0 +1,472 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmrelease
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3"
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
"math"
|
||||
"path"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
HelmReleaseFinalizer = "helmrelease.application.kubesphere.io"
|
||||
IndexerName = "clusterNamespace"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrGetRepoFailed = errors.New("get repo failed")
|
||||
ErrGetAppFailed = errors.New("get app failed")
|
||||
ErrAppVersionDataIsEmpty = errors.New("app version data is empty")
|
||||
ErrGetAppVersionFailed = errors.New("get app version failed")
|
||||
ErrLoadChartFailed = errors.New("load chart failed")
|
||||
ErrLoadChartFromStorageFailed = errors.New("load chart from storage failed")
|
||||
)
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileHelmRelease{}
|
||||
|
||||
// ReconcileWorkspace reconciles a Workspace object
|
||||
type ReconcileHelmRelease struct {
|
||||
StorageClient s3.Interface
|
||||
KsFactory externalversions.SharedInformerFactory
|
||||
clusterClients clusterclient.ClusterClients
|
||||
client.Client
|
||||
recorder record.EventRecorder
|
||||
// mock helm install && uninstall
|
||||
helmMock bool
|
||||
informer cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
//
|
||||
// <==>upgrading===================
|
||||
// | \
|
||||
// creating===>active=====>deleting=>deleted |
|
||||
// \ ^ / |
|
||||
// \ | /======> /
|
||||
// \=>failed<==========================
|
||||
// Reconcile reads that state of the cluster for a helmreleases object and makes changes based on the state read
|
||||
// and what is in the helmreleases.Spec
|
||||
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmreleases/status,verbs=get;update;patch
|
||||
func (r *ReconcileHelmRelease) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
// Fetch the helmReleases instance
|
||||
instance := &v1alpha1.HelmRelease{}
|
||||
err := r.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if instance.Status.State == "" {
|
||||
instance.Status.State = v1alpha1.HelmStatusCreating
|
||||
instance.Status.LastUpdate = metav1.Now()
|
||||
err = r.Status().Update(context.TODO(), instance)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object.
|
||||
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
|
||||
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer)
|
||||
// add owner References
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
} else {
|
||||
// The object is being deleting
|
||||
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmReleaseFinalizer) {
|
||||
|
||||
klog.V(3).Infof("helm uninstall %s/%s from host cluster", instance.GetRlsNamespace(), instance.Spec.Name)
|
||||
err := r.uninstallHelmRelease(instance)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
|
||||
// remove finalizer
|
||||
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
|
||||
if item == HelmReleaseFinalizer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
return r.reconcile(instance)
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRelease) GetChartData(rls *v1alpha1.HelmRelease) (chartName string, chartData []byte, err error) {
|
||||
if rls.Spec.RepoId != "" && rls.Spec.RepoId != v1alpha1.AppStoreRepoId {
|
||||
// load chart data from helm repo
|
||||
repo := v1alpha1.HelmRepo{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: rls.Spec.RepoId}, &repo)
|
||||
if err != nil {
|
||||
klog.Errorf("get helm repo %s failed, error: %v", rls.Spec.RepoId, err)
|
||||
return chartName, chartData, ErrGetRepoFailed
|
||||
}
|
||||
|
||||
index, err := helmrepoindex.ByteArrayToSavedIndex([]byte(repo.Status.Data))
|
||||
|
||||
if version := index.GetApplicationVersion(rls.Spec.ApplicationId, rls.Spec.ApplicationVersionId); version != nil {
|
||||
url := version.Spec.URLs[0]
|
||||
if !(strings.HasPrefix(url, "https://") || strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "s3://")) {
|
||||
url = repo.Spec.Url + "/" + url
|
||||
}
|
||||
buf, err := helmrepoindex.LoadChart(context.TODO(), url, &repo.Spec.Credential)
|
||||
if err != nil {
|
||||
klog.Infof("load chart failed, error: %s", err)
|
||||
return chartName, chartData, ErrLoadChartFailed
|
||||
}
|
||||
chartData = buf.Bytes()
|
||||
chartName = version.Name
|
||||
} else {
|
||||
klog.Errorf("get app version: %s failed", rls.Spec.ApplicationVersionId)
|
||||
return chartName, chartData, ErrGetAppVersionFailed
|
||||
}
|
||||
} else {
|
||||
// load chart data from helm application version
|
||||
appVersion := &v1alpha1.HelmApplicationVersion{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: rls.Spec.ApplicationVersionId}, appVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version %s failed, error: %v", rls.Spec.ApplicationVersionId, err)
|
||||
return chartName, chartData, ErrGetAppVersionFailed
|
||||
}
|
||||
|
||||
chartData, err = r.StorageClient.Read(path.Join(appVersion.GetWorkspace(), appVersion.Name))
|
||||
if err != nil {
|
||||
klog.Errorf("load chart from storage failed, error: %s", err)
|
||||
return chartName, chartData, ErrLoadChartFromStorageFailed
|
||||
}
|
||||
|
||||
chartName = appVersion.GetTrueName()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRelease) reconcile(instance *v1alpha1.HelmRelease) (reconcile.Result, error) {
|
||||
|
||||
if instance.Status.State == v1alpha1.HelmStatusActive && instance.Status.Version == instance.Spec.Version {
|
||||
// check release status
|
||||
return reconcile.Result{
|
||||
// recheck release status after 10 minutes
|
||||
RequeueAfter: 10 * time.Minute,
|
||||
}, nil
|
||||
}
|
||||
|
||||
ft := failedTimes(instance.Status.DeployStatus)
|
||||
if v1alpha1.HelmStatusFailed == instance.Status.State && ft > 0 {
|
||||
// exponential backoff, max delay 180s
|
||||
retryAfter := time.Duration(math.Min(math.Exp2(float64(ft)), 180)) * time.Second
|
||||
var lastDeploy time.Time
|
||||
|
||||
if instance.Status.LastDeployed != nil {
|
||||
lastDeploy = instance.Status.LastDeployed.Time
|
||||
} else {
|
||||
lastDeploy = instance.Status.LastUpdate.Time
|
||||
}
|
||||
if time.Now().Before(lastDeploy.Add(retryAfter)) {
|
||||
return reconcile.Result{RequeueAfter: retryAfter}, nil
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
switch instance.Status.State {
|
||||
case v1alpha1.HelmStatusDeleting:
|
||||
// no operation
|
||||
return reconcile.Result{}, nil
|
||||
case v1alpha1.HelmStatusActive:
|
||||
instance.Status.State = v1alpha1.HelmStatusUpgrading
|
||||
err = r.Status().Update(context.TODO(), instance)
|
||||
return reconcile.Result{}, err
|
||||
case v1alpha1.HelmStatusCreating:
|
||||
// create new release
|
||||
err = r.createOrUpgradeHelmRelease(instance, false)
|
||||
case v1alpha1.HelmStatusFailed:
|
||||
// check failed times
|
||||
err = r.createOrUpgradeHelmRelease(instance, false)
|
||||
case v1alpha1.HelmStatusUpgrading:
|
||||
err = r.createOrUpgradeHelmRelease(instance, true)
|
||||
case v1alpha1.HelmStatusRollbacking:
|
||||
// TODO: rollback helm release
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
var deployStatus v1alpha1.HelmReleaseDeployStatus
|
||||
if err != nil {
|
||||
instance.Status.State = v1alpha1.HelmStatusFailed
|
||||
instance.Status.Message = stringutils.ShortenString(err.Error(), v1alpha1.MsgLen)
|
||||
deployStatus.Message = instance.Status.Message
|
||||
deployStatus.State = v1alpha1.HelmStatusFailed
|
||||
} else {
|
||||
instance.Status.State = v1alpha1.StateActive
|
||||
instance.Status.Message = ""
|
||||
instance.Status.Version = instance.Spec.Version
|
||||
deployStatus.State = v1alpha1.HelmStatusSuccessful
|
||||
}
|
||||
|
||||
deployStatus.Time = now
|
||||
instance.Status.LastUpdate = now
|
||||
instance.Status.LastDeployed = &now
|
||||
if len(instance.Status.DeployStatus) > 0 {
|
||||
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus}, instance.Status.DeployStatus...)
|
||||
if len(instance.Status.DeployStatus) >= 10 {
|
||||
instance.Status.DeployStatus = instance.Status.DeployStatus[:10:10]
|
||||
}
|
||||
} else {
|
||||
instance.Status.DeployStatus = append([]v1alpha1.HelmReleaseDeployStatus{deployStatus})
|
||||
}
|
||||
|
||||
err = r.Status().Update(context.TODO(), instance)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func failedTimes(status []v1alpha1.HelmReleaseDeployStatus) int {
|
||||
count := 0
|
||||
for i := range status {
|
||||
if status[i].State == v1alpha1.HelmStatusFailed {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRelease) createOrUpgradeHelmRelease(rls *v1alpha1.HelmRelease, upgrade bool) error {
|
||||
var chartData []byte
|
||||
var err error
|
||||
_, chartData, err = r.GetChartData(rls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(chartData) == 0 {
|
||||
klog.Errorf("empty chart data failed, release name %s, chart name: %s", rls.Name, rls.Spec.ChartName)
|
||||
return ErrAppVersionDataIsEmpty
|
||||
}
|
||||
|
||||
clusterName := rls.GetRlsCluster()
|
||||
|
||||
var clusterConfig string
|
||||
if clusterName != "" && r.KsFactory != nil {
|
||||
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("get cluster %s config failed", clusterConfig)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If clusterConfig is empty, this application will be installed in current host.
|
||||
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
|
||||
var res helmwrapper.HelmRes
|
||||
if upgrade {
|
||||
res, err = hw.Upgrade(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
|
||||
} else {
|
||||
res, err = hw.Install(rls.Spec.ChartName, string(chartData), string(rls.Spec.Values))
|
||||
}
|
||||
if err != nil {
|
||||
return errors.New(res.Message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRelease) uninstallHelmRelease(rls *v1alpha1.HelmRelease) error {
|
||||
if rls.Status.State != v1alpha1.HelmStatusDeleting {
|
||||
rls.Status.State = v1alpha1.HelmStatusDeleting
|
||||
rls.Status.LastUpdate = metav1.Now()
|
||||
err := r.Status().Update(context.TODO(), rls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
clusterName := rls.GetRlsCluster()
|
||||
var clusterConfig string
|
||||
var err error
|
||||
if clusterName != "" && r.KsFactory != nil {
|
||||
clusterConfig, err = r.clusterClients.GetClusterKubeconfig(clusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("get cluster %s config failed", clusterConfig)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hw := helmwrapper.NewHelmWrapper(clusterConfig, rls.GetRlsNamespace(), rls.Spec.Name, helmwrapper.SetMock(r.helmMock))
|
||||
|
||||
res, err := hw.Uninstall()
|
||||
|
||||
if err != nil {
|
||||
return errors.New(res.Message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRelease) SetupWithManager(mgr ctrl.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
if r.KsFactory != nil {
|
||||
r.clusterClients = clusterclient.NewClusterClient(r.KsFactory.Cluster().V1alpha1().Clusters())
|
||||
|
||||
r.informer = r.KsFactory.Application().V1alpha1().HelmReleases().Informer()
|
||||
err := r.informer.AddIndexers(map[string]cache.IndexFunc{
|
||||
IndexerName: func(obj interface{}) ([]string, error) {
|
||||
rls := obj.(*v1alpha1.HelmRelease)
|
||||
return []string{fmt.Sprintf("%s/%s", rls.GetRlsCluster(), rls.GetRlsNamespace())}, nil
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-mgr.Elected()
|
||||
go r.cleanHelmReleaseWhenNamespaceDeleted()
|
||||
}()
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.HelmRelease{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRelease) getClusterConfig(cluster string) (string, error) {
|
||||
if cluster == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
clusterConfig, err := r.clusterClients.GetClusterKubeconfig(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("get cluster %s config failed", clusterConfig)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return clusterConfig, nil
|
||||
}
|
||||
|
||||
// When namespace have been removed from member cluster, we need clean all
|
||||
// the helmRelease from the host cluster.
|
||||
func (r *ReconcileHelmRelease) cleanHelmReleaseWhenNamespaceDeleted() {
|
||||
|
||||
ticker := time.NewTicker(2 * time.Minute)
|
||||
for _ = range ticker.C {
|
||||
keys := r.informer.GetIndexer().ListIndexFuncValues(IndexerName)
|
||||
for _, clusterNs := range keys {
|
||||
klog.V(4).Infof("clean resource in %s", clusterNs)
|
||||
parts := stringutils.Split(clusterNs, "/")
|
||||
if len(parts) == 2 {
|
||||
cluster, ns := parts[0], parts[1]
|
||||
items, err := r.informer.GetIndexer().ByIndex(IndexerName, clusterNs)
|
||||
if err != nil {
|
||||
klog.Errorf("get items from index failed, error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
kubeconfig, err := r.getClusterConfig(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("get cluster %s config failed, error: %s", cluster, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// connect to member or host cluster
|
||||
var restConfig *restclient.Config
|
||||
if kubeconfig == "" {
|
||||
restConfig, err = restclient.InClusterConfig()
|
||||
} else {
|
||||
cc, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
|
||||
if err != nil {
|
||||
klog.Errorf("get client config for cluster %s failed, error: %s", cluster, err)
|
||||
continue
|
||||
}
|
||||
restConfig, err = cc.ClientConfig()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("build rest config for cluster %s failed, error: %s", cluster, err)
|
||||
continue
|
||||
}
|
||||
|
||||
clientSet, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("create client set failed, error: %s", err)
|
||||
continue
|
||||
}
|
||||
// check namespace exists or not
|
||||
namespace, err := clientSet.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("delete all helm release in %s", clusterNs)
|
||||
for ind := range items {
|
||||
rls := items[ind].(*v1alpha1.HelmRelease)
|
||||
err := r.Client.Delete(context.TODO(), rls)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("delete release %s failed", rls.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("get namespace %s from cluster %s failed, error: %s", ns, cluster, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
for ind := range items {
|
||||
rls := items[ind].(*v1alpha1.HelmRelease)
|
||||
if namespace.CreationTimestamp.After(rls.CreationTimestamp.Time) {
|
||||
klog.V(2).Infof("delete helm release %s in %s", rls.Namespace, clusterNs)
|
||||
// todo, namespace is newer than helmRelease, should we delete the helmRelease
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
307
pkg/controller/openpitrix/helmrepo/helm_repo_controller.go
Normal file
307
pkg/controller/openpitrix/helmrepo/helm_repo_controller.go
Normal file
@@ -0,0 +1,307 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmrepo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/strings"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"math"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// min sync period in seconds
|
||||
MinSyncPeriod = 180
|
||||
|
||||
MinRetryDuration = 60
|
||||
MaxRetryDuration = 600
|
||||
HelmRepoSyncStateLen = 10
|
||||
|
||||
StateSuccess = "successful"
|
||||
StateFailed = "failed"
|
||||
MessageLen = 512
|
||||
)
|
||||
|
||||
const (
|
||||
HelmRepoFinalizer = "helmrepo.application.kubesphere.io"
|
||||
)
|
||||
|
||||
// Add creates a new Workspace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
}
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileHelmRepo{Client: mgr.GetClient(), scheme: mgr.GetScheme(),
|
||||
recorder: mgr.GetEventRecorderFor("workspace-controller"),
|
||||
config: mgr.GetConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("helm-repo-controller", mgr, controller.Options{Reconciler: r})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes to HelmRelease
|
||||
err = c.Watch(&source.Kind{Type: &v1alpha1.HelmRepo{}}, &handler.EnqueueRequestForObject{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileHelmRepo{}
|
||||
|
||||
// ReconcileWorkspace reconciles a Workspace object
|
||||
type ReconcileHelmRepo struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
config *rest.Config
|
||||
}
|
||||
|
||||
// Reconcile reads that state of the cluster for a helmrepoes object and makes changes based on the state read
|
||||
// and what is in the helmreleases.Spec
|
||||
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmrepos,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=application.kubesphere.io,resources=helmrepos/status,verbs=get;update;patch
|
||||
func (r *ReconcileHelmRepo) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
start := time.Now()
|
||||
klog.Infof("sync repo: %s", request.Name)
|
||||
defer func() {
|
||||
klog.Infof("sync repo end: %s, elapsed: %v", request.Name, time.Now().Sub(start))
|
||||
}()
|
||||
// Fetch the helmrepoes instance
|
||||
instance := &v1alpha1.HelmRepo{}
|
||||
err := r.Client.Get(context.TODO(), request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// Object not found, return. Created objects are automatically garbage collected.
|
||||
// For additional cleanup logic use finalizers.
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// Error reading the object - requeue the request.
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if instance.Status.State == "" {
|
||||
instance.Status.State = v1alpha1.RepoStateSyncing
|
||||
return reconcile.Result{}, r.Status().Update(context.Background(), instance)
|
||||
}
|
||||
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object.
|
||||
if !sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmRepoFinalizer) {
|
||||
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, HelmRepoFinalizer)
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmRepoFinalizer) {
|
||||
// remove our finalizer from the list and update it.
|
||||
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
|
||||
if item == HelmRepoFinalizer {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err := r.Update(context.Background(), instance); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
copyInstance := instance.DeepCopy()
|
||||
|
||||
if copyInstance.Spec.SyncPeriod != 0 && copyInstance.Spec.SyncPeriod < MinSyncPeriod {
|
||||
copyInstance.Spec.SyncPeriod = MinSyncPeriod
|
||||
}
|
||||
|
||||
retryAfter := 0
|
||||
if syncNow, after := needReSyncNow(copyInstance); syncNow {
|
||||
// sync repo
|
||||
syncErr := r.syncRepo(copyInstance)
|
||||
state := copyInstance.Status.SyncState
|
||||
now := metav1.Now()
|
||||
if syncErr != nil {
|
||||
// failed
|
||||
state = append([]v1alpha1.HelmRepoSyncState{{
|
||||
State: v1alpha1.RepoStateFailed,
|
||||
Message: strings.ShortenString(syncErr.Error(), MessageLen),
|
||||
SyncTime: &now,
|
||||
}}, state...)
|
||||
copyInstance.Status.State = v1alpha1.RepoStateFailed
|
||||
} else {
|
||||
state = append([]v1alpha1.HelmRepoSyncState{{
|
||||
State: v1alpha1.RepoStateSuccessful,
|
||||
SyncTime: &now,
|
||||
}}, state...)
|
||||
|
||||
copyInstance.Status.Version = instance.Spec.Version
|
||||
copyInstance.Status.State = v1alpha1.RepoStateSuccessful
|
||||
}
|
||||
|
||||
copyInstance.Status.LastUpdateTime = &now
|
||||
if len(state) > HelmRepoSyncStateLen {
|
||||
state = state[0:HelmRepoSyncStateLen]
|
||||
}
|
||||
copyInstance.Status.SyncState = state
|
||||
|
||||
err = r.Client.Status().Update(context.TODO(), copyInstance)
|
||||
if err != nil {
|
||||
klog.Errorf("update status failed, error: %s", err)
|
||||
return reconcile.Result{
|
||||
RequeueAfter: MinRetryDuration * time.Second,
|
||||
}, err
|
||||
} else {
|
||||
retryAfter = MinSyncPeriod
|
||||
if syncErr == nil {
|
||||
retryAfter = copyInstance.Spec.SyncPeriod
|
||||
}
|
||||
}
|
||||
} else {
|
||||
retryAfter = after
|
||||
}
|
||||
|
||||
return reconcile.Result{
|
||||
RequeueAfter: time.Duration(retryAfter) * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// needReSyncNow checks instance whether need resync now
|
||||
// if resync is true, it should resync not
|
||||
// if resync is false and after > 0, it should resync in after seconds
|
||||
func needReSyncNow(instance *v1alpha1.HelmRepo) (syncNow bool, after int) {
|
||||
|
||||
now := time.Now()
|
||||
if instance.Status.SyncState == nil || len(instance.Status.SyncState) == 0 {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
states := instance.Status.SyncState
|
||||
|
||||
failedTimes := 0
|
||||
for i := range states {
|
||||
if states[i].State != StateSuccess {
|
||||
failedTimes += 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
state := states[0]
|
||||
|
||||
if instance.Spec.Version != instance.Status.Version && failedTimes == 0 {
|
||||
// repo has a successful synchronization
|
||||
diff := now.Sub(state.SyncTime.Time) / time.Second
|
||||
if diff > 0 && diff < MinRetryDuration {
|
||||
return false, int(math.Max(10, float64(MinRetryDuration-diff)))
|
||||
} else {
|
||||
return true, 0
|
||||
}
|
||||
}
|
||||
|
||||
period := 0
|
||||
if state.State != StateSuccess {
|
||||
period = MinRetryDuration * failedTimes
|
||||
if period > MaxRetryDuration {
|
||||
period = MaxRetryDuration
|
||||
}
|
||||
if now.After(state.SyncTime.Add(time.Duration(period) * time.Second)) {
|
||||
return true, 0
|
||||
}
|
||||
} else {
|
||||
period = instance.Spec.SyncPeriod
|
||||
if period != 0 {
|
||||
if period < MinSyncPeriod {
|
||||
period = MinSyncPeriod
|
||||
}
|
||||
if now.After(state.SyncTime.Add(time.Duration(period) * time.Second)) {
|
||||
return true, 0
|
||||
}
|
||||
} else {
|
||||
// need not to sync
|
||||
return false, 0
|
||||
}
|
||||
}
|
||||
|
||||
after = int(state.SyncTime.Time.Add(time.Duration(period) * time.Second).Sub(now).Seconds())
|
||||
|
||||
// may be less than 10 second
|
||||
if after <= 10 {
|
||||
after = 10
|
||||
}
|
||||
return false, after
|
||||
}
|
||||
|
||||
func (r *ReconcileHelmRepo) syncRepo(instance *v1alpha1.HelmRepo) error {
|
||||
// 1. load index from helm repo
|
||||
index, err := helmrepoindex.LoadRepoIndex(context.TODO(), instance.Spec.Url, &instance.Spec.Credential)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("load index failed, repo: %s, url: %s, err: %s", instance.GetTrueName(), instance.Spec.Url, err)
|
||||
return err
|
||||
}
|
||||
|
||||
existsSavedIndex := &helmrepoindex.SavedIndex{}
|
||||
if len(instance.Status.Data) != 0 {
|
||||
existsSavedIndex, err = helmrepoindex.ByteArrayToSavedIndex([]byte(instance.Status.Data))
|
||||
if err != nil {
|
||||
klog.Errorf("json unmarshal failed, repo: %s, error: %s", instance.GetTrueName(), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. merge new index with old index which is stored in crd
|
||||
savedIndex := helmrepoindex.MergeRepoIndex(index, existsSavedIndex)
|
||||
|
||||
// 3. save index in crd
|
||||
data, err := savedIndex.Bytes()
|
||||
if err != nil {
|
||||
klog.Errorf("json marshal failed, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
instance.Status.Data = string(data)
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmrepo
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"path/filepath"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestHelmRepoController(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"HelmRepo Controller Test Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme.Scheme,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = Add(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helmrepo
|
||||
|
||||
import (
|
||||
"context"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"kubesphere.io/kubesphere/pkg/apis/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var repoUrl = "https://charts.kubesphere.io/main"
|
||||
|
||||
var _ = Describe("helmRepo", func() {
|
||||
|
||||
const timeout = time.Second * 360
|
||||
const interval = time.Second * 1
|
||||
|
||||
repo := createRepo()
|
||||
|
||||
BeforeEach(func() {
|
||||
err := k8sClient.Create(context.Background(), repo)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
Context("Helm Repo Controller", func() {
|
||||
It("Should success", func() {
|
||||
key := types.NamespacedName{
|
||||
Name: repo.Name,
|
||||
}
|
||||
|
||||
By("Expecting repo state is successful")
|
||||
Eventually(func() bool {
|
||||
repo := &v1alpha1.HelmRepo{}
|
||||
k8sClient.Get(context.Background(), key, repo)
|
||||
return repo.Status.State == v1alpha1.RepoStateSuccessful && len(repo.Status.Data) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func createRepo() *v1alpha1.HelmRepo {
|
||||
return &v1alpha1.HelmRepo{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmRepoIdPrefix),
|
||||
},
|
||||
Spec: v1alpha1.HelmRepoSpec{
|
||||
Url: repoUrl,
|
||||
},
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user