Implement the helm executor package (#5245)

* Implement the helm executor package

* add createNamespace option and delete service account related logic
This commit is contained in:
Xinzhao Xu
2022-09-29 14:25:24 +08:00
committed by GitHub
parent 978bd5576e
commit ac3ecb5a96
5 changed files with 1935 additions and 8 deletions

View File

@@ -0,0 +1,136 @@
module kubesphere.io/utils
go 1.18
require (
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.9.4
k8s.io/api v0.24.6
k8s.io/apimachinery v0.24.6
k8s.io/cli-runtime v0.24.6
k8s.io/client-go v0.24.6
k8s.io/klog v1.0.0
k8s.io/utils v0.0.0-20220922133306-665eaaec4324
sigs.k8s.io/controller-runtime v0.12.3
sigs.k8s.io/kustomize/api v0.12.1
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/BurntSushi/toml v1.0.0 // indirect
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
github.com/Masterminds/squirrel v1.5.3 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/containerd/containerd v1.6.6 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/cli v20.10.17+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.17+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-gorp/gorp/v3 v3.0.2 // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.2.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.6 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rubenv/sql-migrate v1.1.1 // indirect
github.com/russross/blackfriday v1.5.2 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/cobra v1.4.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/apiserver v0.24.2 // indirect
k8s.io/component-base v0.24.2 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20220627174259-011e075b9cb8 // indirect
k8s.io/kubectl v0.24.2 // indirect
oras.land/oras-go v1.2.0 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,37 @@
# Copyright 2022 The KubeSphere Authors. All rights reserved.
# Use of this source code is governed by an Apache license
# that can be found in the LICENSE file.
# Download dependencies
FROM alpine:3.16 as base_os_context
ARG TARGETARCH=amd64
ARG TARGETOS=linux
ARG HELM_VERSION=v3.5.2
ARG KUSTOMIZE_VERSION=v4.2.0
ENV OUTDIR=/out
RUN mkdir -p ${OUTDIR}/usr/local/bin
WORKDIR /tmp
RUN apk add --no-cache ca-certificates
# Install helm
ADD https://get.helm.sh/helm-${HELM_VERSION}-${TARGETOS}-${TARGETARCH}.tar.gz /tmp
RUN tar xvzf /tmp/helm-${HELM_VERSION}-${TARGETOS}-${TARGETARCH}.tar.gz -C /tmp
RUN mv /tmp/${TARGETOS}-${TARGETARCH}/helm ${OUTDIR}/usr/local/bin/
# Install kustomize
ADD https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_${TARGETOS}_${TARGETARCH}.tar.gz /tmp
RUN tar xvzf /tmp/kustomize_${KUSTOMIZE_VERSION}_${TARGETOS}_${TARGETARCH}.tar.gz -C /tmp
RUN mv /tmp/kustomize ${OUTDIR}/usr/local/bin/
# Final Image
FROM alpine:3.16
COPY --from=base_os_context /out/ /
WORKDIR /
CMD ["sh"]

View File

@@ -17,8 +17,27 @@ limitations under the License.
package helm
import (
"bytes"
"context"
"errors"
"fmt"
"path/filepath"
"time"
"gopkg.in/yaml.v3"
"helm.sh/helm/v3/pkg/action"
helmrelease "helm.sh/helm/v3/pkg/release"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/kustomize/api/types"
)
// Executor is used to manage a helm release, you can install/uninstall and upgrade a chart
@@ -36,41 +55,465 @@ type Executor interface {
IsReleaseReady(timeout time.Duration) (bool, error)
}
const (
workspaceBaseSource = "/tmp/helm-executor-source"
workspaceBase = "/tmp/helm-executor"
statusNotFoundFormat = "release: not found"
releaseExists = "release exists"
kustomizationFile = "kustomization.yaml"
postRenderExecFile = "helm-post-render.sh"
// kustomize cannot read stdio now, so we save helm stdout to file, then kustomize reads that file and build the resources
kustomizeBuild = `#!/bin/sh
# save helm stdout to file, then kustomize read this file
cat > ./.local-helm-output.yaml
kustomize build
`
)
var (
errorTimedOutToWaitResource = errors.New("timed out waiting for resources to be ready")
)
type executor struct {
// target cluster client
client.Client
kubeConfig string
namespace string
// helm release name
releaseName string
// helm action Config
helmConf *action.Configuration
helmImage string
// add labels to helm chart
labels map[string]string
// add annotations to helm chart
annotations map[string]string
createNamespace bool
dryRun bool
}
type Option func(*executor)
// SetDryRun sets the dryRun option.
func SetDryRun(dryRun bool) Option {
return func(e *executor) {
e.dryRun = dryRun
}
}
// SetAnnotations sets extra annotations added to all resources in chart.
func SetAnnotations(annotations map[string]string) Option {
return func(e *executor) {
e.annotations = annotations
}
}
// SetLabels sets extra labels added to all resources in chart.
func SetLabels(labels map[string]string) Option {
return func(e *executor) {
e.labels = labels
}
}
// SetHelmImage sets the helmImage option.
func SetHelmImage(helmImage string) Option {
return func(e *executor) {
e.helmImage = helmImage
}
}
// SetKubeConfig sets the kube config data of the target cluster.
func SetKubeConfig(kubeConfig string) Option {
return func(e *executor) {
e.kubeConfig = kubeConfig
}
}
// SetCreateNamespace sets the createNamespace option.
func SetCreateNamespace(createNamespace bool) Option {
return func(e *executor) {
e.createNamespace = createNamespace
}
}
// NewExecutor generates a new Executor instance with the following parameters:
// - kubeConfig: kube config data of the target cluster
// - namespace: the namespace of the helm release
// - releaseName: the helm release name
// - options: functions to set optional parameters
func NewExecutor(kubeConfig, namespace, releaseName string, options ...Option) (Executor, error) {
return &executor{}, nil
func NewExecutor(namespace, releaseName string, options ...Option) (Executor, error) {
e := &executor{
namespace: namespace,
releaseName: releaseName,
helmImage: "kubesphere/helm:latest",
}
for _, option := range options {
option(e)
}
restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(e.kubeConfig))
if err != nil {
return nil, err
}
clusterClient, err := client.New(restConfig, client.Options{})
if err != nil {
return nil, err
}
e.Client = clusterClient
klog.V(8).Infof("namespace: %s, release name: %s, kube config:%s", e.namespace, e.releaseName, e.kubeConfig)
getter := NewClusterRESTClientGetter(e.kubeConfig, e.namespace)
e.helmConf = new(action.Configuration)
if err = e.helmConf.Init(getter, e.namespace, "", klog.Infof); err != nil {
return nil, err
}
if e.createNamespace {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: e.namespace,
},
}
if err = e.Create(context.Background(), ns); err != nil && !apierrors.IsAlreadyExists(err) {
return e, err
}
}
return e, nil
}
// Install installs the specified chart, returns the name of the Job that executed the task.
func (e *executor) Install(ctx context.Context, chartName, chartData, values string) (string, error) {
return "", nil
sts, err := e.status()
if err == nil {
// helm release has been installed
if sts.Info != nil && sts.Info.Status == "deployed" {
return "", nil
}
return "", errors.New(releaseExists)
} else {
if err.Error() == statusNotFoundFormat {
// continue to install
return e.createInstallJob(ctx, chartName, chartData, values, false)
}
return "", err
}
}
// Upgrade upgrades the specified chart, returns the name of the Job that executed the task.
func (e *executor) Upgrade(ctx context.Context, chartName, chartData, values string) (string, error) {
return "", nil
sts, err := e.status()
if err != nil {
return "", err
}
if sts.Info.Status == "deployed" {
return e.createInstallJob(ctx, chartName, chartData, values, true)
}
return "", fmt.Errorf("cannot upgrade release %s/%s, current state is %s", e.namespace, e.releaseName, sts.Info.Status)
}
func (e *executor) kubeConfigPath() string {
if len(e.kubeConfig) == 0 {
return ""
}
return "kube.config"
}
func (e *executor) chartPath(chartName string) string {
return fmt.Sprintf("%s.tgz", chartName)
}
func (e *executor) setupChartData(chartName, chartData, values string) (map[string]string, error) {
if len(e.labels) == 0 && len(e.annotations) == 0 {
return nil, nil
}
kustomizationConfig := types.Kustomization{
Resources: []string{"./.local-helm-output.yaml"},
CommonAnnotations: e.annotations, // add extra annotations to output
Labels: []types.Label{{Pairs: e.labels}}, // Labels to add to all objects but not selectors.
}
kustomizationData, err := yaml.Marshal(kustomizationConfig)
if err != nil {
return nil, err
}
data := map[string]string{
postRenderExecFile: kustomizeBuild,
kustomizationFile: string(kustomizationData),
e.chartPath(chartName): chartData,
"values.yaml": values,
}
if e.kubeConfigPath() != "" {
data[e.kubeConfigPath()] = e.kubeConfig
}
return data, nil
}
func generateName(name string) string {
return fmt.Sprintf("helm-executor-%s-%s", name, rand.String(6))
}
func (e *executor) createConfigMap(ctx context.Context, chartName, chartData, values string) (string, error) {
data, err := e.setupChartData(chartName, chartData, values)
if err != nil {
return "", err
}
name := generateName(chartName)
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: e.namespace,
},
Data: data,
}
if err = e.Create(ctx, configMap); err != nil {
return "", err
}
return name, nil
}
func (e *executor) createInstallJob(ctx context.Context, chartName, chartData, values string, upgrade bool) (string, error) {
args := make([]string, 0, 10)
if upgrade {
args = append(args, "upgrade")
} else {
args = append(args, "install")
}
args = append(args, "--wait", e.releaseName, e.chartPath(chartName), "--namespace", e.namespace)
if len(values) > 0 {
args = append(args, "--values", "values.yaml")
}
if e.dryRun {
args = append(args, "--dry-run")
}
if e.kubeConfigPath() != "" {
args = append(args, "--kubeconfig", e.kubeConfigPath())
}
// Post render, add annotations or labels to resources
if len(e.labels) > 0 || len(e.annotations) > 0 {
args = append(args, "--post-renderer", filepath.Join(workspaceBase, postRenderExecFile))
}
if klog.V(8) {
// output debug info
args = append(args, "--debug")
}
name, err := e.createConfigMap(ctx, chartName, chartData, values)
if err != nil {
return "", err
}
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: e.namespace,
},
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32(1),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "helm",
Image: e.helmImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"helm"},
Args: args,
WorkingDir: workspaceBase,
VolumeMounts: []corev1.VolumeMount{
{
Name: "source",
MountPath: workspaceBaseSource,
},
{
Name: "data",
MountPath: workspaceBase,
},
},
Lifecycle: &corev1.Lifecycle{
PostStart: &corev1.LifecycleHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", fmt.Sprintf("cp -r %s/. %s", workspaceBaseSource, workspaceBase)},
},
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "source",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: name,
},
DefaultMode: pointer.Int32(0755),
},
},
},
{
Name: "data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
RestartPolicy: corev1.RestartPolicyNever,
TerminationGracePeriodSeconds: new(int64),
},
},
},
}
if err = e.Create(ctx, job); err != nil {
return "", err
}
return name, nil
}
// Uninstall uninstalls the specified chart, returns the name of the Job that executed the task.
func (e *executor) Uninstall(ctx context.Context) (string, error) {
return "", nil
if _, err := e.status(); err != nil && err.Error() == statusNotFoundFormat {
// already uninstalled
return "", nil
}
args := []string{
"uninstall",
e.releaseName,
"--namespace",
e.namespace,
}
if e.dryRun {
args = append(args, "--dry-run")
}
name := generateName(e.releaseName)
if e.kubeConfigPath() != "" {
args = append(args, "--kubeconfig", e.kubeConfigPath())
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: e.namespace,
},
Data: map[string]string{
e.kubeConfigPath(): e.kubeConfig,
},
}
if err := e.Create(ctx, configMap); err != nil {
return "", err
}
}
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: e.namespace,
},
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32(1),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "helm",
Image: e.helmImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"helm"},
Args: args,
WorkingDir: workspaceBase,
},
},
RestartPolicy: corev1.RestartPolicyNever,
TerminationGracePeriodSeconds: new(int64),
},
},
},
}
if e.kubeConfigPath() != "" {
job.Spec.Template.Spec.Volumes = []corev1.Volume{
{
Name: "data",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: name,
},
DefaultMode: pointer.Int32(0755),
},
},
},
}
job.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
{
Name: "data",
MountPath: workspaceBase,
},
}
}
if err := e.Create(ctx, job); err != nil {
return "", err
}
return name, nil
}
// Manifest returns the manifest data for this release.
func (e *executor) Manifest() (string, error) {
return "", nil
get := action.NewGet(e.helmConf)
rel, err := get.Run(e.releaseName)
if err != nil {
klog.Errorf("namespace: %s, name: %s, run command failed, error: %v", e.namespace, e.releaseName, err)
return "", err
}
klog.V(2).Infof("namespace: %s, name: %s, run command success", e.namespace, e.releaseName)
klog.V(8).Infof("namespace: %s, name: %s, run command success, manifest: %s", e.namespace, e.releaseName, rel.Manifest)
return rel.Manifest, nil
}
// IsReleaseReady checks if the helm release is ready.
func (e *executor) IsReleaseReady(timeout time.Duration) (bool, error) {
return false, nil
// Get the manifest to build resources
manifest, err := e.Manifest()
if err != nil {
return false, err
}
kubeClient := e.helmConf.KubeClient
resources, _ := kubeClient.Build(bytes.NewBufferString(manifest), true)
err = kubeClient.Wait(resources, timeout)
if err == nil {
return true, nil
}
if err == wait.ErrWaitTimeout {
return false, errorTimedOutToWaitResource
}
return false, err
}
func (e *executor) status() (*helmrelease.Release, error) {
helmStatus := action.NewStatus(e.helmConf)
rel, err := helmStatus.Run(e.releaseName)
if err != nil {
if err.Error() == statusNotFoundFormat {
klog.V(2).Infof("namespace: %s, name: %s, run command failed, error: %v", e.namespace, e.releaseName, err)
return nil, err
}
klog.Errorf("namespace: %s, name: %s, run command failed, error: %v", e.namespace, e.releaseName, err)
return nil, err
}
klog.V(2).Infof("namespace: %s, name: %s, run command success", e.namespace, e.releaseName)
klog.V(8).Infof("namespace: %s, name: %s, run command success, manifest: %s", e.namespace, e.releaseName, rel.Manifest)
return rel, nil
}

View File

@@ -0,0 +1,87 @@
/*
Copyright 2022 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/discovery"
memory "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
)
func NewClusterRESTClientGetter(kubeconfig, namespace string) genericclioptions.RESTClientGetter {
if kubeconfig != "" {
return NewMemoryRESTClientGetter([]byte(kubeconfig), namespace)
}
flags := genericclioptions.NewConfigFlags(true)
flags.Namespace = &namespace
return flags
}
// MemoryRESTClientGetter is an implementation of the genericclioptions.RESTClientGetter.
type MemoryRESTClientGetter struct {
kubeConfig []byte
namespace string
}
func NewMemoryRESTClientGetter(kubeConfig []byte, namespace string) genericclioptions.RESTClientGetter {
return &MemoryRESTClientGetter{
kubeConfig: kubeConfig,
namespace: namespace,
}
}
func (c *MemoryRESTClientGetter) ToRESTConfig() (*rest.Config, error) {
cfg, err := clientcmd.RESTConfigFromKubeConfig(c.kubeConfig)
if err != nil {
return nil, err
}
return cfg, nil
}
func (c *MemoryRESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
config, err := c.ToRESTConfig()
if err != nil {
return nil, err
}
discoveryClient, _ := discovery.NewDiscoveryClientForConfig(config)
return memory.NewMemCacheClient(discoveryClient), nil
}
func (c *MemoryRESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
discoveryClient, err := c.ToDiscoveryClient()
if err != nil {
return nil, err
}
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
expander := restmapper.NewShortcutExpander(mapper, discoveryClient)
return expander, nil
}
func (c *MemoryRESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults}
overrides.Context.Namespace = c.namespace
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
}