feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -37,6 +37,7 @@ import (
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
multierror "github.com/hashicorp/go-multierror"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -68,6 +69,14 @@ var ManagedFieldsManager string
// Client represents a client capable of communicating with the Kubernetes API.
type Client struct {
// Factory provides a minimal version of the kubectl Factory interface. If
// you need the full Factory you can type switch to the full interface.
// Since Kubernetes Go API does not provide backwards compatibility across
// minor versions, this API does not follow Helm backwards compatibility.
// Helm is exposing Kubernetes in this property and cannot guarantee this
// will not change. The minimal interface only has the functions that Helm
// needs. The smaller surface area of the interface means there is a lower
// chance of it changing.
Factory Factory
Log func(string, ...interface{})
// Namespace allows to bypass the kubeconfig file for the choice of the namespace
@@ -337,13 +346,7 @@ func (c *Client) Build(reader io.Reader, validate bool) (ResourceList, error) {
validationDirective = metav1.FieldValidationStrict
}
dynamicClient, err := c.Factory.DynamicClient()
if err != nil {
return nil, err
}
verifier := resource.NewQueryParamVerifier(dynamicClient, c.Factory.OpenAPIGetter(), resource.QueryParamFieldValidation)
schema, err := c.Factory.Validator(validationDirective, verifier)
schema, err := c.Factory.Validator(validationDirective)
if err != nil {
return nil, err
}
@@ -363,13 +366,7 @@ func (c *Client) BuildTable(reader io.Reader, validate bool) (ResourceList, erro
validationDirective = metav1.FieldValidationStrict
}
dynamicClient, err := c.Factory.DynamicClient()
if err != nil {
return nil, err
}
verifier := resource.NewQueryParamVerifier(dynamicClient, c.Factory.OpenAPIGetter(), resource.QueryParamFieldValidation)
schema, err := c.Factory.Validator(validationDirective, verifier)
schema, err := c.Factory.Validator(validationDirective)
if err != nil {
return nil, err
}
@@ -456,7 +453,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, KeepPolicy)
continue
}
if err := deleteResource(info); err != nil {
if err := deleteResource(info, metav1.DeletePropagationBackground); err != nil {
c.Log("Failed to delete %q, err: %s", info.ObjectName(), err)
continue
}
@@ -465,17 +462,29 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err
return res, nil
}
// Delete deletes Kubernetes resources specified in the resources list. It will
// attempt to delete all resources even if one or more fail and collect any
// errors. All successfully deleted items will be returned in the `Deleted`
// ResourceList that is part of the result.
// Delete deletes Kubernetes resources specified in the resources list with
// background cascade deletion. It will attempt to delete all resources even
// if one or more fail and collect any errors. All successfully deleted items
// will be returned in the `Deleted` ResourceList that is part of the result.
func (c *Client) Delete(resources ResourceList) (*Result, []error) {
return rdelete(c, resources, metav1.DeletePropagationBackground)
}
// Delete deletes Kubernetes resources specified in the resources list with
// given deletion propagation policy. It will attempt to delete all resources even
// if one or more fail and collect any errors. All successfully deleted items
// will be returned in the `Deleted` ResourceList that is part of the result.
func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) {
return rdelete(c, resources, policy)
}
func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) {
var errs []error
res := &Result{}
mtx := sync.Mutex{}
err := perform(resources, func(info *resource.Info) error {
c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
err := deleteResource(info)
err := deleteResource(info, propagation)
if err == nil || apierrors.IsNotFound(err) {
if err != nil {
c.Log("Ignoring delete failure for %q %s: %v", info.Name, info.Mapping.GroupVersionKind, err)
@@ -492,10 +501,8 @@ func (c *Client) Delete(resources ResourceList) (*Result, []error) {
return nil
})
if err != nil {
// Rewrite the message from "no objects visited" if that is what we got
// back
if err == ErrNoObjectsVisited {
err = errors.New("object not found, skipping delete")
if errors.Is(err, ErrNoObjectsVisited) {
err = fmt.Errorf("object not found, skipping delete: %w", err)
}
errs = append(errs, err)
}
@@ -532,6 +539,8 @@ func (c *Client) WatchUntilReady(resources ResourceList, timeout time.Duration)
}
func perform(infos ResourceList, fn func(*resource.Info) error) error {
var result error
if len(infos) == 0 {
return ErrNoObjectsVisited
}
@@ -542,10 +551,11 @@ func perform(infos ResourceList, fn func(*resource.Info) error) error {
for range infos {
err := <-errs
if err != nil {
return err
result = multierror.Append(result, err)
}
}
return nil
return result
}
// getManagedFieldsManager returns the manager string. If one was set it will be returned.
@@ -593,8 +603,7 @@ func createResource(info *resource.Info) error {
return info.Refresh(obj, true)
}
func deleteResource(info *resource.Info) error {
policy := metav1.DeletePropagationBackground
func deleteResource(info *resource.Info, policy metav1.DeletionPropagation) error {
opts := &metav1.DeleteOptions{PropagationPolicy: &policy}
_, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts)
return err
@@ -763,7 +772,7 @@ func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) {
if c.Type == batch.JobComplete && c.Status == "True" {
return true, nil
} else if c.Type == batch.JobFailed && c.Status == "True" {
return true, errors.Errorf("job failed: %s", c.Reason)
return true, errors.Errorf("job %s failed: %s", name, c.Reason)
}
}

View File

@@ -18,7 +18,6 @@ package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@@ -27,6 +26,12 @@ import (
// Factory provides abstractions that allow the Kubectl command to be extended across multiple types
// of resources and different API sets.
// This interface is a minimal copy of the kubectl Factory interface containing only the functions
// needed by Helm. Since Kubernetes Go APIs, including interfaces, can change in any minor release
// this interface is not covered by the Helm backwards compatibility guarantee. The reasons for the
// minimal copy is that it does not include the full interface. Changes or additions to functions
// Helm does not need are not impacted or exposed. This minimizes the impact of Kubernetes changes
// being exposed.
type Factory interface {
// ToRawKubeConfigLoader return kubeconfig loader as-is
ToRawKubeConfigLoader() clientcmd.ClientConfig
@@ -42,7 +47,5 @@ type Factory interface {
NewBuilder() *resource.Builder
// Returns a schema that can validate objects stored on disk.
Validator(validationDirective string, verifier *resource.QueryParamVerifier) (validation.Schema, error)
// OpenAPIGetter returns a getter for the openapi schema document
OpenAPIGetter() discovery.OpenAPISchemaInterface
Validator(validationDirective string) (validation.Schema, error)
}

View File

@@ -22,6 +22,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/resource"
@@ -37,10 +38,12 @@ type FailingKubeClient struct {
GetError error
WaitError error
DeleteError error
DeleteWithPropagationError error
WatchUntilReadyError error
UpdateError error
BuildError error
BuildTableError error
BuildDummy bool
BuildUnstructuredError error
WaitAndGetCompletedPodPhaseError error
WaitDuration time.Duration
@@ -116,6 +119,9 @@ func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error
if f.BuildError != nil {
return []*resource.Info{}, f.BuildError
}
if f.BuildDummy {
return createDummyResourceList(), nil
}
return f.PrintingKubeClient.Build(r, false)
}
@@ -134,3 +140,21 @@ func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duratio
}
return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d)
}
// DeleteWithPropagationPolicy returns the configured error if set or prints
func (f *FailingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) {
if f.DeleteWithPropagationError != nil {
return nil, []error{f.DeleteWithPropagationError}
}
return f.PrintingKubeClient.DeleteWithPropagationPolicy(resources, policy)
}
func createDummyResourceList() kube.ResourceList {
var resInfo resource.Info
resInfo.Name = "dummyName"
resInfo.Namespace = "dummyNamespace"
var resourceList kube.ResourceList
resourceList.Append(&resInfo)
return resourceList
}

View File

@@ -22,6 +22,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/resource"
@@ -48,7 +49,7 @@ func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result,
return &kube.Result{Created: resources}, nil
}
func (p *PrintingKubeClient) Get(resources kube.ResourceList, related bool) (map[string][]runtime.Object, error) {
func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[string][]runtime.Object, error) {
_, err := io.Copy(p.Out, bufferize(resources))
if err != nil {
return nil, err
@@ -115,6 +116,17 @@ func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Durati
return v1.PodSucceeded, nil
}
// DeleteWithPropagationPolicy implements KubeClient delete.
//
// It only prints out the content to be deleted.
func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) {
_, err := io.Copy(p.Out, bufferize(resources))
if err != nil {
return nil, []error{err}
}
return &kube.Result{Deleted: resources}, nil
}
func bufferize(resources kube.ResourceList) io.Reader {
var builder strings.Builder
for _, info := range resources {

View File

@@ -21,6 +21,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -79,6 +80,14 @@ type InterfaceExt interface {
WaitForDelete(resources ResourceList, timeout time.Duration) error
}
// InterfaceDeletionPropagation is introduced to avoid breaking backwards compatibility for Interface implementers.
//
// TODO Helm 4: Remove InterfaceDeletionPropagation and integrate its method(s) into the Interface.
type InterfaceDeletionPropagation interface {
// Delete destroys one or more resources. The deletion propagation is handled as per the given deletion propagation value.
DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error)
}
// InterfaceResources is introduced to avoid breaking backwards compatibility for Interface implementers.
//
// TODO Helm 4: Remove InterfaceResources and integrate its method(s) into the Interface.
@@ -103,4 +112,5 @@ type InterfaceResources interface {
var _ Interface = (*Client)(nil)
var _ InterfaceExt = (*Client)(nil)
var _ InterfaceDeletionPropagation = (*Client)(nil)
var _ InterfaceResources = (*Client)(nil)

View File

@@ -18,6 +18,7 @@ package kube // import "helm.sh/helm/v3/pkg/kube"
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
appsv1beta1 "k8s.io/api/apps/v1beta1"
@@ -83,19 +84,12 @@ type ReadyChecker struct {
// IsReady checks if v is ready. It supports checking readiness for pods,
// deployments, persistent volume claims, services, daemon sets, custom
// resource definitions, stateful sets, replication controllers, and replica
// sets. All other resource kinds are always considered ready.
// resource definitions, stateful sets, replication controllers, jobs (optional),
// and replica sets. All other resource kinds are always considered ready.
//
// IsReady will fetch the latest state of the object from the server prior to
// performing readiness checks, and it will return any error encountered.
func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) {
var (
// This defaults to true, otherwise we get to a point where
// things will always return false unless one of the objects
// that manages pods has been hit
ok = true
err error
)
switch value := AsVersioned(v).(type) {
case *corev1.Pod:
pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
@@ -105,9 +99,11 @@ func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, err
case *batchv1.Job:
if c.checkJobs {
job, err := c.client.BatchV1().Jobs(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
if err != nil || !c.jobReady(job) {
if err != nil {
return false, err
}
ready, err := c.jobReady(job)
return ready, err
}
case *appsv1.Deployment, *appsv1beta1.Deployment, *appsv1beta2.Deployment, *extensionsv1beta1.Deployment:
currentDeployment, err := c.client.AppsV1().Deployments(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
@@ -180,11 +176,30 @@ func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, err
if !c.statefulSetReady(sts) {
return false, nil
}
case *corev1.ReplicationController, *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet:
ok, err = c.podsReadyForObject(ctx, v.Namespace, value)
}
if !ok || err != nil {
return false, err
case *corev1.ReplicationController:
rc, err := c.client.CoreV1().ReplicationControllers(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if !c.replicationControllerReady(rc) {
return false, nil
}
ready, err := c.podsReadyForObject(ctx, v.Namespace, value)
if !ready || err != nil {
return false, err
}
case *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet:
rs, err := c.client.AppsV1().ReplicaSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if !c.replicaSetReady(rs) {
return false, nil
}
ready, err := c.podsReadyForObject(ctx, v.Namespace, value)
if !ready || err != nil {
return false, err
}
}
return true, nil
}
@@ -222,16 +237,17 @@ func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool {
return false
}
func (c *ReadyChecker) jobReady(job *batchv1.Job) bool {
func (c *ReadyChecker) jobReady(job *batchv1.Job) (bool, error) {
if job.Status.Failed > *job.Spec.BackoffLimit {
c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName())
return false
// If a job is failed, it can't recover, so throw an error
return false, fmt.Errorf("job is failed: %s/%s", job.GetNamespace(), job.GetName())
}
if job.Spec.Completions != nil && job.Status.Succeeded < *job.Spec.Completions {
c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName())
return false
return false, nil
}
return true
return true, nil
}
func (c *ReadyChecker) serviceReady(s *corev1.Service) bool {
@@ -272,6 +288,16 @@ func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool {
}
func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool {
// Verify the replicaset readiness
if !c.replicaSetReady(rs) {
return false
}
// Verify the generation observed by the deployment controller matches the spec generation
if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation {
c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation)
return false
}
expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep)
if !(rs.Status.ReadyReplicas >= expectedReady) {
c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady)
@@ -281,6 +307,12 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy
}
func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool {
// Verify the generation observed by the daemonSet controller matches the spec generation
if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation {
c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation)
return false
}
// If the update strategy is not a rolling update, there will be nothing to wait for
if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
return true
@@ -351,22 +383,22 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool {
}
func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
// Verify the generation observed by the statefulSet controller matches the spec generation
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation)
return false
}
// If the update strategy is not a rolling update, there will be nothing to wait for
if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type)
return true
}
// Make sure the status is up-to-date with the StatefulSet changes
if sts.Status.ObservedGeneration < sts.Generation {
c.log("StatefulSet is not ready: %s/%s. update has not yet been observed", sts.Namespace, sts.Name)
return false
}
// Dereference all the pointers because StatefulSets like them
var partition int
// 1 is the default for replicas if not set
var replicas = 1
replicas := 1
// For some reason, even if the update strategy is a rolling update, the
// actual rollingUpdate field can be nil. If it is, we can safely assume
// there is no partition value
@@ -393,8 +425,10 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas)
return false
}
if sts.Status.CurrentRevision != sts.Status.UpdateRevision {
// This check only makes sense when all partitions are being upgraded otherwise during a
// partioned rolling upgrade, this condition will never evaluate to true, leading to
// error.
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
c.log("StatefulSet is not ready: %s/%s. currentRevision %s does not yet match updateRevision %s", sts.Namespace, sts.Name, sts.Status.CurrentRevision, sts.Status.UpdateRevision)
return false
}
@@ -403,6 +437,24 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool {
return true
}
func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool {
// Verify the generation observed by the replicationController controller matches the spec generation
if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation {
c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation)
return false
}
return true
}
func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool {
// Verify the generation observed by the replicaSet controller matches the spec generation
if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation {
c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation)
return false
}
return true
}
func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) {
list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: selector,

View File

@@ -22,5 +22,6 @@ const ResourcePolicyAnno = "helm.sh/resource-policy"
// KeepPolicy is the resource policy type for keep
//
// This resource policy type allows resources to skip being deleted
// during an uninstallRelease action.
//
// during an uninstallRelease action.
const KeepPolicy = "keep"

View File

@@ -50,7 +50,7 @@ func (w *waiter) waitForResources(created ResourceList) error {
ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
defer cancel()
return wait.PollImmediateUntil(2*time.Second, func() (bool, error) {
return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) {
for _, v := range created {
ready, err := w.c.IsReady(ctx, v)
if !ready || err != nil {
@@ -58,7 +58,7 @@ func (w *waiter) waitForResources(created ResourceList) error {
}
}
return true, nil
}, ctx.Done())
})
}
// waitForDeletedResources polls to check if all the resources are deleted or a timeout is reached
@@ -68,7 +68,7 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
defer cancel()
return wait.PollImmediateUntil(2*time.Second, func() (bool, error) {
return wait.PollUntilContextCancel(ctx, 2*time.Second, true, func(ctx context.Context) (bool, error) {
for _, v := range deleted {
err := v.Get()
if err == nil || !apierrors.IsNotFound(err) {
@@ -76,7 +76,7 @@ func (w *waiter) waitForDeletedResources(deleted ResourceList) error {
}
}
return true, nil
}, ctx.Done())
})
}
// SelectorsForObject returns the pod label selector for a given object