feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,6 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster

View File

@@ -1,85 +1,88 @@
/*
Copyright 2020 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster
import (
"context"
"errors"
"fmt"
"net/http"
"strings"
kscontroller "kubesphere.io/kubesphere/pkg/controller"
v1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
)
type ValidatingHandler struct {
Client client.Client
decoder *admission.Decoder
const webhookName = "cluster-webhook"
func (v *Webhook) Name() string {
return webhookName
}
var _ admission.DecoderInjector = &ValidatingHandler{}
// InjectDecoder injects the decoder into a ValidatingHandler.
func (h *ValidatingHandler) InjectDecoder(d *admission.Decoder) error {
h.decoder = d
return nil
func (v *Webhook) Enabled(clusterRole string) bool {
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
}
// Handle handles admission requests.
func (h *ValidatingHandler) Handle(ctx context.Context, req admission.Request) admission.Response {
if req.Operation != v1.Update {
return admission.Allowed("")
}
var _ kscontroller.Controller = &Webhook{}
var _ admission.CustomValidator = &Webhook{}
newCluster := &clusterv1alpha1.Cluster{}
if err := h.decoder.DecodeRaw(req.Object, newCluster); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
type Webhook struct {
}
oldCluster := &clusterv1alpha1.Cluster{}
if err := h.decoder.DecodeRaw(req.OldObject, oldCluster); err != nil {
return admission.Errored(http.StatusBadRequest, err)
func (v *Webhook) SetupWithManager(mgr *kscontroller.Manager) error {
return builder.WebhookManagedBy(mgr).
For(&clusterv1alpha1.Cluster{}).
WithValidator(v).
Complete()
}
func (v *Webhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}
func (v *Webhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) {
oldCluster, ok := oldObj.(*clusterv1alpha1.Cluster)
if !ok {
return nil, fmt.Errorf("expected a Cluster but got a %T", oldObj)
}
newCluster, ok := newObj.(*clusterv1alpha1.Cluster)
if !ok {
return nil, fmt.Errorf("expected a Cluster but got a %T", newObj)
}
// The cluster created for the first time has no status information
if oldCluster.Status.UID == "" {
return admission.Allowed("")
return nil, nil
}
clusterConfig, err := clientcmd.RESTConfigFromKubeConfig(newCluster.Spec.Connection.KubeConfig)
if err != nil {
return admission.Denied(fmt.Sprintf("failed to load cluster config for %s: %s", newCluster.Name, err))
return nil, fmt.Errorf("failed to load cluster config for %s: %s", newCluster.Name, err)
}
clusterClient, err := kubernetes.NewForConfig(clusterConfig)
if err != nil {
return admission.Denied(err.Error())
return nil, err
}
kubeSystem, err := clusterClient.CoreV1().Namespaces().Get(ctx, metav1.NamespaceSystem, metav1.GetOptions{})
if err != nil {
return admission.Denied(err.Error())
return nil, err
}
if oldCluster.Status.UID != kubeSystem.UID {
return admission.Denied("this kubeconfig corresponds to a different cluster than the previous one, you need to make sure that kubeconfig is not from another cluster")
return nil, errors.New("this kubeconfig corresponds to a different cluster than the previous one, you need to make sure that kubeconfig is not from another cluster")
}
return admission.Allowed("")
return nil, nil
}
func (v *Webhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}

View File

@@ -1,65 +1,132 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster
import (
"os"
"context"
"errors"
"time"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/storage/driver"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/utils/helm"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"kubesphere.io/kubesphere/pkg/config"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/utils/hashutil"
)
func buildKubeconfigFromRestConfig(config *rest.Config) ([]byte, error) {
apiConfig := api.NewConfig()
const releaseName = "ks-core"
apiCluster := &api.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
func configChanged(cluster *clusterv1alpha1.Cluster) bool {
return hashutil.FNVString(cluster.Spec.Config) != cluster.Annotations[constants.ConfigHashAnnotation]
}
func setConfigHash(cluster *clusterv1alpha1.Cluster) {
configHash := hashutil.FNVString(cluster.Spec.Config)
if cluster.Annotations == nil {
cluster.Annotations = map[string]string{
constants.ConfigHashAnnotation: configHash,
}
} else {
cluster.Annotations[constants.ConfigHashAnnotation] = configHash
}
}
func installKSCoreInMemberCluster(kubeConfig []byte, jwtSecret, chartPath string, chartConfig []byte) error {
helmConf, err := helm.InitHelmConf(kubeConfig, constants.KubeSphereNamespace)
if err != nil {
return err
}
// generated kubeconfig will be used by cluster federation, CAFile is not
// accepted by kubefed, so we need read CAFile
if len(apiCluster.CertificateAuthorityData) == 0 && len(config.CAFile) != 0 {
caData, err := os.ReadFile(config.CAFile)
if err != nil {
return nil, err
if chartPath == "" {
chartPath = "/var/helm-charts/ks-core"
}
chart, err := loader.Load(chartPath) // in-container chart path
if err != nil {
return err
}
// values example:
// map[string]interface{}{
// "nestedKey": map[string]interface{}{
// "simpleKey": "simpleValue",
// },
// }
values := make(map[string]interface{})
if chartConfig != nil {
if err = yaml.Unmarshal(chartConfig, &values); err != nil {
return err
}
}
// Override some necessary values
values["role"] = "member"
// disable upgrade to prevent execution of ks-upgrade
values["upgrade"] = map[string]interface{}{
"enabled": false,
}
if err = unstructured.SetNestedField(values, jwtSecret, "authentication", "issuer", "jwtSecret"); err != nil {
return err
}
helmStatus := action.NewStatus(helmConf)
if _, err = helmStatus.Run(releaseName); err != nil {
if !errors.Is(err, driver.ErrReleaseNotFound) {
return err
}
apiCluster.CertificateAuthorityData = caData
// the release not exists
install := action.NewInstall(helmConf)
install.Namespace = constants.KubeSphereNamespace
install.CreateNamespace = true
install.Wait = true
install.ReleaseName = releaseName
install.Timeout = time.Minute * 5
if _, err = install.Run(chart, values); err != nil {
return err
}
return nil
}
apiConfig.Clusters["kubernetes"] = apiCluster
apiConfig.AuthInfos["kubernetes-admin"] = &api.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
Token: config.BearerToken,
TokenFile: config.BearerTokenFile,
Username: config.Username,
Password: config.Password,
upgrade := action.NewUpgrade(helmConf)
upgrade.Namespace = constants.KubeSphereNamespace
upgrade.Install = true
upgrade.Wait = true
upgrade.Timeout = time.Minute * 5
if _, err = upgrade.Run(releaseName, chart, values); err != nil {
return err
}
apiConfig.Contexts["kubernetes-admin@kubernetes"] = &api.Context{
Cluster: "kubernetes",
AuthInfo: "kubernetes-admin",
}
apiConfig.CurrentContext = "kubernetes-admin@kubernetes"
return clientcmd.Write(*apiConfig)
return nil
}
func getKubeSphereConfig(ctx context.Context, client runtimeclient.Client) (*config.Config, error) {
cm := &corev1.ConfigMap{}
if err := client.Get(ctx, types.NamespacedName{Name: constants.KubeSphereConfigName, Namespace: constants.KubeSphereNamespace}, cm); err != nil {
return nil, err
}
configData, err := config.FromConfigMap(cm)
if err != nil {
return nil, err
}
return configData, nil
}
func hasCondition(conditions []clusterv1alpha1.ClusterCondition, conditionsType clusterv1alpha1.ClusterConditionType) bool {
for _, condition := range conditions {
if condition.Type == conditionsType && condition.Status == corev1.ConditionTrue {
return true
}
}
return false
}

View File

@@ -1,854 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"fmt"
"reflect"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
kubeclient "k8s.io/client-go/kubernetes"
k8sscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
fedapis "sigs.k8s.io/kubefed/pkg/apis"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
"kubesphere.io/api/types/v1beta1"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1"
)
var (
// Policy rules allowing full access to resources in the cluster
// or namespace.
namespacedPolicyRules = []rbacv1.PolicyRule{
{
Verbs: []string{rbacv1.VerbAll},
APIGroups: []string{rbacv1.APIGroupAll},
Resources: []string{rbacv1.ResourceAll},
},
}
clusterPolicyRules = []rbacv1.PolicyRule{
namespacedPolicyRules[0],
{
NonResourceURLs: []string{rbacv1.NonResourceAll},
Verbs: []string{"get"},
},
}
localSchemeBuilder = runtime.SchemeBuilder{
fedapis.AddToScheme,
k8sscheme.AddToScheme,
v1beta1.AddToScheme,
}
)
const (
tokenKey = "token"
serviceAccountSecretTimeout = 30 * time.Second
kubefedManagedSelector = "kubefed.io/managed=true"
)
// joinClusterForNamespace registers a cluster with a KubeFed control
// plane. The KubeFed namespace in the joining cluster is provided by
// the joiningNamespace parameter.
func joinClusterForNamespace(hostConfig, clusterConfig *rest.Config, kubefedNamespace,
joiningNamespace, hostClusterName, joiningClusterName, secretName string, labels map[string]string,
scope apiextv1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
hostClientset, err := HostClientset(hostConfig)
if err != nil {
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
return nil, err
}
clusterClientset, err := ClusterClientset(clusterConfig)
if err != nil {
klog.V(2).Infof("Failed to get joining cluster clientset: %v", err)
return nil, err
}
scheme := runtime.NewScheme()
localSchemeBuilder.AddToScheme(scheme)
client, err := client.New(hostConfig, client.Options{Scheme: scheme})
if err != nil {
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
return nil, err
}
klog.V(2).Infof("Performing preflight checks.")
err = performPreflightChecks(clusterClientset, joiningClusterName, hostClusterName, joiningNamespace, errorOnExisting)
if err != nil {
return nil, err
}
klog.V(2).Infof("Creating %s namespace in joining cluster", joiningNamespace)
_, err = createKubeFedNamespace(clusterClientset, joiningNamespace, joiningClusterName, dryRun)
if err != nil {
klog.V(2).Infof("Error creating %s namespace in joining cluster: %v", joiningNamespace, err)
return nil, err
}
klog.V(2).Infof("Created %s namespace in joining cluster", joiningNamespace)
saName, err := createAuthorizedServiceAccount(clusterClientset, joiningNamespace, joiningClusterName, hostClusterName, scope, dryRun, errorOnExisting)
if err != nil {
return nil, err
}
secret, _, err := populateSecretInHostCluster(clusterClientset, hostClientset,
saName, kubefedNamespace, joiningNamespace, joiningClusterName, secretName, dryRun)
if err != nil {
klog.V(2).Infof("Error creating secret in host cluster: %s due to: %v", hostClusterName, err)
return nil, err
}
var disabledTLSValidations []fedv1b1.TLSValidation
if clusterConfig.TLSClientConfig.Insecure {
disabledTLSValidations = append(disabledTLSValidations, fedv1b1.TLSAll)
}
kubefedCluster, err := createKubeFedCluster(clusterConfig, client, joiningClusterName, clusterConfig.Host,
secret.Name, kubefedNamespace, clusterConfig.CAData, disabledTLSValidations, labels, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Failed to create federated cluster resource: %v", err)
return nil, err
}
klog.V(2).Info("Created federated cluster resource")
return kubefedCluster, nil
}
// performPreflightChecks checks that the host and joining clusters are in
// a consistent state.
func performPreflightChecks(clusterClientset kubeclient.Interface, name, hostClusterName,
kubefedNamespace string, errorOnExisting bool) error {
// Make sure there is no existing service account in the joining cluster.
saName := util.ClusterServiceAccountName(name, hostClusterName)
_, err := clusterClientset.CoreV1().ServiceAccounts(kubefedNamespace).Get(context.Background(), saName, metav1.GetOptions{})
switch {
case apierrors.IsNotFound(err):
return nil
case err != nil:
return err
case errorOnExisting:
return errors.Errorf("service account: %s already exists in joining cluster: %s", saName, name)
default:
klog.V(2).Infof("Service account %s already exists in joining cluster %s", saName, name)
return nil
}
}
// createKubeFedCluster creates a federated cluster resource that associates
// the cluster and secret.
func createKubeFedCluster(clusterConfig *rest.Config, client client.Client, joiningClusterName, apiEndpoint,
secretName, kubefedNamespace string, caBundle []byte, disabledTLSValidations []fedv1b1.TLSValidation,
labels map[string]string, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
fedCluster := &fedv1b1.KubeFedCluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: kubefedNamespace,
Name: joiningClusterName,
Labels: labels,
},
Spec: fedv1b1.KubeFedClusterSpec{
APIEndpoint: apiEndpoint,
CABundle: caBundle,
SecretRef: fedv1b1.LocalSecretReference{
Name: secretName,
},
DisabledTLSValidations: disabledTLSValidations,
},
}
if dryRun {
return fedCluster, nil
}
existingFedCluster := &fedv1b1.KubeFedCluster{}
key := types.NamespacedName{Namespace: kubefedNamespace, Name: joiningClusterName}
err := client.Get(context.TODO(), key, existingFedCluster)
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not retrieve federated cluster %s due to %v", joiningClusterName, err)
return nil, err
case err == nil && errorOnExisting:
return nil, errors.Errorf("federated cluster %s already exists in host cluster", joiningClusterName)
case err == nil:
if retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err = client.Get(context.TODO(), key, existingFedCluster); err != nil {
return err
}
existingFedCluster.Spec = fedCluster.Spec
existingFedCluster.Labels = labels
return client.Update(context.TODO(), existingFedCluster)
}); retryErr != nil {
klog.V(2).Infof("Could not update federated cluster %s due to %v", fedCluster.Name, err)
return nil, err
}
return existingFedCluster, nil
default:
if err = checkWorkspaces(clusterConfig, client, fedCluster); err != nil {
klog.V(2).Infof("Validate federated cluster %s failed due to %v", fedCluster.Name, err)
return nil, err
}
if err = client.Create(context.TODO(), fedCluster); err != nil {
klog.V(2).Infof("Could not create federated cluster %s due to %v", fedCluster.Name, err)
return nil, err
}
return fedCluster, nil
}
}
// createKubeFedNamespace creates the kubefed namespace in the cluster
// associated with clusterClientset, if it doesn't already exist.
func createKubeFedNamespace(clusterClientset kubeclient.Interface, kubefedNamespace,
joiningClusterName string, dryRun bool) (*corev1.Namespace, error) {
fedNamespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: kubefedNamespace,
},
}
if dryRun {
return fedNamespace, nil
}
_, err := clusterClientset.CoreV1().Namespaces().Get(context.Background(), kubefedNamespace, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
klog.V(2).Infof("Could not get %s namespace: %v", kubefedNamespace, err)
return nil, err
}
if err == nil {
klog.V(2).Infof("Already existing %s namespace", kubefedNamespace)
return fedNamespace, nil
}
// Not found, so create.
_, err = clusterClientset.CoreV1().Namespaces().Create(context.Background(), fedNamespace, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
klog.V(2).Infof("Could not create %s namespace: %v", kubefedNamespace, err)
return nil, err
}
return fedNamespace, nil
}
// createAuthorizedServiceAccount creates a service account and grants
// the privileges required by the KubeFed control plane to manage
// resources in the joining cluster. The name of the created service
// account is returned on success.
func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface,
namespace, joiningClusterName, hostClusterName string,
scope apiextv1.ResourceScope, dryRun, errorOnExisting bool) (string, error) {
klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName)
saName, err := createServiceAccountWithSecret(joiningClusterClientset, namespace,
joiningClusterName, hostClusterName, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v",
saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created service account: %s in joining cluster: %s", saName, joiningClusterName)
if scope == apiextv1.NamespaceScoped {
klog.V(2).Infof("Creating role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
err = createRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating role and binding for service account: %s in joining cluster: %s due to: %v", saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created role and binding for service account: %s in joining cluster: %s",
saName, joiningClusterName)
klog.V(2).Infof("Creating health check cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
err = createHealthCheckClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName,
dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating health check cluster role and binding for service account: %s in joining cluster: %s due to: %v",
saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created health check cluster role and binding for service account: %s in joining cluster: %s",
saName, joiningClusterName)
} else {
klog.V(2).Infof("Creating cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
err = createClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
if err != nil {
klog.V(2).Infof("Error creating cluster role and binding for service account: %s in joining cluster: %s due to: %v",
saName, joiningClusterName, err)
return "", err
}
klog.V(2).Infof("Created cluster role and binding for service account: %s in joining cluster: %s",
saName, joiningClusterName)
}
return saName, nil
}
// createServiceAccountWithSecret creates a service account and secret in the cluster associated
// with clusterClientset with credentials that will be used by the host cluster
// to access its API server.
func createServiceAccountWithSecret(clusterClientset kubeclient.Interface, namespace,
joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) {
saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName)
if dryRun {
return saName, nil
}
ctx := context.Background()
sa, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Get(ctx, saName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
sa = &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: saName,
Namespace: namespace,
},
}
// We must create the sa first, then create the associated secret, and update the sa at last.
// Or the kube-controller-manager will delete the secret.
sa, err = clusterClientset.CoreV1().ServiceAccounts(namespace).Create(ctx, sa, metav1.CreateOptions{})
switch {
case apierrors.IsAlreadyExists(err) && errorOnExisting:
klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName)
return "", err
case err != nil && !apierrors.IsAlreadyExists(err):
klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
return "", err
}
} else {
return "", err
}
}
if len(sa.Secrets) > 0 {
return saName, nil
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-token-", saName),
Namespace: namespace,
Annotations: map[string]string{
corev1.ServiceAccountNameKey: saName,
},
},
Type: corev1.SecretTypeServiceAccountToken,
}
// After kubernetes v1.24, kube-controller-manger will not create the default secret for
// service account. http://kep.k8s.io/2800
// Create a default secret.
secret, err = clusterClientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
klog.V(2).Infof("Could not create secret for service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
return "", err
}
// At last, update the service account.
sa.Secrets = append(sa.Secrets, corev1.ObjectReference{Name: secret.Name})
_, err = clusterClientset.CoreV1().ServiceAccounts(namespace).Update(ctx, sa, metav1.UpdateOptions{})
switch {
case err != nil:
klog.Infof("Could not update service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
return "", err
default:
return saName, nil
}
}
func bindingSubjects(saName, namespace string) []rbacv1.Subject {
return []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
}
}
// createClusterRoleAndBinding creates an RBAC cluster role and
// binding that allows the service account identified by saName to
// access all resources in all namespaces in the cluster associated
// with clientset.
func createClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
if dryRun {
return nil
}
roleName := util.RoleName(saName)
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: clusterPolicyRules,
}
existingRole, err := clientset.RbacV1().ClusterRoles().Get(context.Background(), roleName, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get cluster role for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
existingRole.Rules = role.Rules
_, err := clientset.RbacV1().ClusterRoles().Update(context.Background(), existingRole, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
default: // role was not found
_, err := clientset.RbacV1().ClusterRoles().Create(context.Background(), role, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
// TODO: This should limit its access to only necessary resources.
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Subjects: bindingSubjects(saName, namespace),
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: roleName,
},
}
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(context.Background(), binding.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get cluster role binding for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
// must be deleted and recreated with the correct roleRef
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
err = clientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), existingBinding.Name, metav1.DeleteOptions{})
if err != nil {
klog.V(2).Infof("Could not delete existing cluster role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
} else {
existingBinding.Subjects = binding.Subjects
_, err := clientset.RbacV1().ClusterRoleBindings().Update(context.Background(), existingBinding, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
default:
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
return nil
}
// createRoleAndBinding creates an RBAC role and binding
// that allows the service account identified by saName to access all
// resources in the specified namespace.
func createRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
if dryRun {
return nil
}
roleName := util.RoleName(saName)
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: namespacedPolicyRules,
}
existingRole, err := clientset.RbacV1().Roles(namespace).Get(context.Background(), roleName, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not retrieve role for service account %s in joining cluster %s due to %v", saName, clusterName, err)
return err
case errorOnExisting && err == nil:
return errors.Errorf("role for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
existingRole.Rules = role.Rules
_, err = clientset.RbacV1().Roles(namespace).Update(context.Background(), existingRole, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
default:
_, err := clientset.RbacV1().Roles(namespace).Create(context.Background(), role, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Subjects: bindingSubjects(saName, namespace),
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "Role",
Name: roleName,
},
}
existingBinding, err := clientset.RbacV1().RoleBindings(namespace).Get(context.Background(), binding.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not retrieve role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("role binding for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
// must be deleted and recreated with the correct roleRef
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
err = clientset.RbacV1().RoleBindings(namespace).Delete(context.Background(), existingBinding.Name, metav1.DeleteOptions{})
if err != nil {
klog.V(2).Infof("Could not delete existing role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
_, err = clientset.RbacV1().RoleBindings(namespace).Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
} else {
existingBinding.Subjects = binding.Subjects
_, err = clientset.RbacV1().RoleBindings(namespace).Update(context.Background(), existingBinding, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
}
default:
_, err = clientset.RbacV1().RoleBindings(namespace).Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
return nil
}
// createHealthCheckClusterRoleAndBinding creates an RBAC cluster role and
// binding that allows the service account identified by saName to
// access the health check path of the cluster.
func createHealthCheckClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
if dryRun {
return nil
}
roleName := util.HealthCheckRoleName(saName, namespace)
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{"Get"},
NonResourceURLs: []string{"/healthz"},
},
// The cluster client expects to be able to list nodes to retrieve zone and region details.
// TODO(marun) Consider making zone/region retrieval optional
{
Verbs: []string{"list"},
APIGroups: []string{""},
Resources: []string{"nodes"},
},
},
}
existingRole, err := clientset.RbacV1().ClusterRoles().Get(context.Background(), role.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get health check cluster role for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("health check cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
existingRole.Rules = role.Rules
_, err := clientset.RbacV1().ClusterRoles().Update(context.Background(), existingRole, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update health check cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
default: // role was not found
_, err := clientset.RbacV1().ClusterRoles().Create(context.Background(), role, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create health check cluster role for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Subjects: bindingSubjects(saName, namespace),
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: roleName,
},
}
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(context.Background(), binding.Name, metav1.GetOptions{})
switch {
case err != nil && !apierrors.IsNotFound(err):
klog.V(2).Infof("Could not get health check cluster role binding for service account %s in joining cluster %s due to %v",
saName, clusterName, err)
return err
case err == nil && errorOnExisting:
return errors.Errorf("health check cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
case err == nil:
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
// must be deleted and recreated with the correct roleRef
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
err = clientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), existingBinding.Name, metav1.DeleteOptions{})
if err != nil {
klog.V(2).Infof("Could not delete existing health check cluster role binding for service account %s in joining cluster %s due to: %v",
saName, clusterName, err)
return err
}
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
} else {
existingBinding.Subjects = binding.Subjects
_, err := clientset.RbacV1().ClusterRoleBindings().Update(context.Background(), existingBinding, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Could not update health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
default:
_, err = clientset.RbacV1().ClusterRoleBindings().Create(context.Background(), binding, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
saName, clusterName, err)
return err
}
}
return nil
}
// populateSecretInHostCluster copies the service account secret for saName
// from the cluster referenced by clusterClientset to the client referenced by
// hostClientset, putting it in a secret named secretName in the provided
// namespace.
func populateSecretInHostCluster(clusterClientset, hostClientset kubeclient.Interface,
saName, hostNamespace, joiningNamespace, joiningClusterName, secretName string,
dryRun bool) (*corev1.Secret, []byte, error) {
klog.V(2).Infof("Creating cluster credentials secret in host cluster")
if dryRun {
dryRunSecret := &corev1.Secret{}
dryRunSecret.Name = secretName
return dryRunSecret, nil, nil
}
// Get the secret from the joining cluster.
var secret *corev1.Secret
err := wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) {
sa, err := clusterClientset.CoreV1().ServiceAccounts(joiningNamespace).Get(context.Background(), saName,
metav1.GetOptions{})
if err != nil {
return false, nil
}
for _, objReference := range sa.Secrets {
saSecretName := objReference.Name
var err error
secret, err = clusterClientset.CoreV1().Secrets(joiningNamespace).Get(context.Background(), saSecretName, metav1.GetOptions{})
if err != nil {
return false, nil
}
if secret.Type == corev1.SecretTypeServiceAccountToken {
klog.V(2).Infof("Using secret named: %s", secret.Name)
return true, nil
}
}
return false, nil
})
if err != nil {
klog.V(2).Infof("Could not get service account secret from joining cluster: %v", err)
return nil, nil, err
}
token, ok := secret.Data[tokenKey]
if !ok {
return nil, nil, errors.Errorf("Key %q not found in service account secret", tokenKey)
}
// Create a secret in the host cluster containing the token.
v1Secret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: hostNamespace,
},
Data: map[string][]byte{
tokenKey: token,
},
}
if secretName == "" {
v1Secret.GenerateName = joiningClusterName + "-"
} else {
v1Secret.Name = secretName
}
var v1SecretResult *corev1.Secret
_, err = hostClientset.CoreV1().Secrets(hostNamespace).Get(context.Background(), v1Secret.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Create(context.Background(), &v1Secret, metav1.CreateOptions{})
if err != nil {
klog.V(2).Infof("Could not create secret in host cluster: %v", err)
return nil, nil, err
}
return v1SecretResult, nil, nil
}
klog.V(2).Infof("Could not get secret %s in host cluster: %v", v1Secret.Name, err)
return nil, nil, err
} else {
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Update(context.Background(), &v1Secret, metav1.UpdateOptions{})
if err != nil {
klog.V(2).Infof("Update secret %s in host cluster failed: %v", v1Secret.Name, err)
return nil, nil, err
}
}
// caBundle is optional so no error is suggested if it is not
// found in the secret.
caBundle := secret.Data["ca.crt"]
klog.V(2).Infof("Created secret in host cluster named: %s", v1SecretResult.Name)
return v1SecretResult, caBundle, nil
}
func checkWorkspaces(clusterConfig *rest.Config, hostClient client.Client, cluster *fedv1b1.KubeFedCluster) error {
tenantclient, err := v1alpha1.NewForConfig(clusterConfig)
if err != nil {
return err
}
workspaces, err := tenantclient.Workspaces().List(context.TODO(), metav1.ListOptions{LabelSelector: kubefedManagedSelector})
if err != nil {
return err
}
// Workspaces with the `kubefed.io/managed: true` label will be deleted if the FederatedWorkspace's Clusters don't include the cluster.
// The user needs to remove the label or delete the workspace manually.
for _, ws := range workspaces.Items {
fedWorkspace := &v1beta1.FederatedWorkspace{}
key := types.NamespacedName{Name: ws.Name}
err := hostClient.Get(context.TODO(), key, fedWorkspace)
if err != nil {
// Continue to check next workspace, when it's not exist in the host.
if apierrors.IsNotFound(err) {
continue
}
return err
}
if !containsCluster(fedWorkspace.Spec.Placement, cluster.Name) {
denied := errors.Errorf("The workspace %s is found in the target member cluster %s, which is conflict with the workspace on host", ws.Name, cluster.Name)
return denied
}
}
return nil
}
func containsCluster(placement v1beta1.GenericPlacementFields, str string) bool {
// Use selector if clusters are nil. But we ignore selector here.
if placement.Clusters == nil {
return true
}
for _, s := range placement.Clusters {
if s.Name == str {
return true
}
}
return false
}

View File

@@ -0,0 +1,294 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package cluster
import (
"bytes"
"context"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"time"
certificatesv1 "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
"kubesphere.io/kubesphere/pkg/utils/pkiutil"
)
func (r *Reconciler) updateKubeConfigExpirationDateCondition(
ctx context.Context, cluster *clusterv1alpha1.Cluster, clusterClient client.Client, config *rest.Config,
) error {
// we don't need to check member clusters which using proxy mode, their certs are managed and will be renewed by tower.
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
return nil
}
klog.V(4).Infof("sync KubeConfig expiration date for cluster %s", cluster.Name)
cert, err := parseKubeConfigCert(config)
if err != nil {
return fmt.Errorf("parseKubeConfigCert for cluster %s failed: %v", cluster.Name, err)
}
if cert == nil || cert.NotAfter.IsZero() {
// delete the KubeConfigCertExpiresInSevenDays condition if it has
conditions := make([]clusterv1alpha1.ClusterCondition, 0)
for _, condition := range cluster.Status.Conditions {
if condition.Type == clusterv1alpha1.ClusterKubeConfigCertExpiresInSevenDays {
continue
}
conditions = append(conditions, condition)
}
cluster.Status.Conditions = conditions
return nil
}
seconds := time.Until(cert.NotAfter).Seconds()
if seconds/86400 <= 7 {
if err = r.renewKubeConfig(ctx, cluster, clusterClient, config, cert); err != nil {
return err
}
}
r.updateClusterCondition(cluster, clusterv1alpha1.ClusterCondition{
Type: clusterv1alpha1.ClusterKubeConfigCertExpiresInSevenDays,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: string(clusterv1alpha1.ClusterKubeConfigCertExpiresInSevenDays),
Message: cert.NotAfter.String(),
})
return nil
}
func parseKubeConfigCert(config *rest.Config) (*x509.Certificate, error) {
if config.CertData == nil {
return nil, nil
}
block, _ := pem.Decode(config.CertData)
if block == nil {
return nil, fmt.Errorf("pem.Decode failed, got empty block data")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
return cert, nil
}
func (r *Reconciler) renewKubeConfig(
ctx context.Context, cluster *clusterv1alpha1.Cluster, clusterClient client.Client, config *rest.Config, cert *x509.Certificate,
) error {
apiConfig, err := clientcmd.Load(cluster.Spec.Connection.KubeConfig)
if err != nil {
return err
}
currentContext := apiConfig.Contexts[apiConfig.CurrentContext]
username := currentContext.AuthInfo
authInfo := apiConfig.AuthInfos[username]
if authInfo.Token != "" {
return nil
}
for _, v := range cert.Subject.Organization {
// we cannot update the certificate of the system:masters group and will use the certificate of the admin user directly
// certificatesigningrequests.certificates.k8s.io is forbidden:
// use of kubernetes.io/kube-apiserver-client signer with system:masters group is not allowed
//
// for cases where we can't issue a certificate, we use the token of the kubesphere service account directly
if v == user.SystemPrivilegedGroup {
data, err := setKubeSphereSAToken(ctx, clusterClient, apiConfig, username)
if err != nil {
return err
}
cluster.Spec.Connection.KubeConfig = data
return nil
}
}
kubeconfig, err := genKubeConfig(ctx, clusterClient, config, username)
if err != nil {
return err
}
cluster.Spec.Connection.KubeConfig = kubeconfig
return nil
}
func setKubeSphereSAToken(
ctx context.Context, clusterClient client.Client, apiConfig *clientcmdapi.Config, username string,
) ([]byte, error) {
secrets := &corev1.SecretList{}
if err := clusterClient.List(ctx, secrets,
client.InNamespace(constants.KubeSphereNamespace),
client.MatchingLabels{"kubesphere.io/service-account-token": ""},
); err != nil {
return nil, err
}
var secret *corev1.Secret
for i, item := range secrets.Items {
if item.Type == corev1.SecretTypeServiceAccountToken {
secret = &secrets.Items[i]
break
}
}
if secret == nil {
return nil, fmt.Errorf("no kubesphere ServiceAccount secret found")
}
apiConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
username: {
Token: string(secret.Data["token"]),
},
}
data, err := clientcmd.Write(*apiConfig)
if err != nil {
return nil, err
}
return data, nil
}
func genKubeConfig(ctx context.Context, clusterClient client.Client, clusterConfig *rest.Config, username string) ([]byte, error) {
csrName, err := createCSR(ctx, clusterClient, username)
if err != nil {
return nil, err
}
var privateKey, clientCert []byte
if err = wait.PollUntilContextTimeout(ctx, time.Second*3, time.Minute, false, func(ctx context.Context) (bool, error) {
csr := &certificatesv1.CertificateSigningRequest{}
if err = clusterClient.Get(ctx, types.NamespacedName{Name: csrName}, csr); err != nil {
return false, err
}
if len(csr.Status.Certificate) == 0 {
return false, nil
}
privateKey = []byte(csr.Annotations[kubeconfig.PrivateKeyAnnotation])
clientCert = csr.Status.Certificate
return true, nil
}); err != nil {
return nil, err
}
var ca []byte
if len(clusterConfig.CAData) > 0 {
ca = clusterConfig.CAData
} else {
ca, err = os.ReadFile(kubeconfig.InClusterCAFilePath)
if err != nil {
klog.Errorf("Failed to read CA file: %v", err)
return nil, err
}
}
currentContext := fmt.Sprintf("%s@%s", username, kubeconfig.DefaultClusterName)
config := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Preferences: clientcmdapi.Preferences{},
Clusters: map[string]*clientcmdapi.Cluster{kubeconfig.DefaultClusterName: {
Server: clusterConfig.Host,
InsecureSkipTLSVerify: false,
CertificateAuthorityData: ca,
}},
Contexts: map[string]*clientcmdapi.Context{currentContext: {
Cluster: kubeconfig.DefaultClusterName,
AuthInfo: username,
Namespace: kubeconfig.DefaultNamespace,
}},
AuthInfos: map[string]*clientcmdapi.AuthInfo{
username: {
ClientKeyData: privateKey,
ClientCertificateData: clientCert,
},
},
CurrentContext: currentContext,
}
return clientcmd.Write(config)
}
func createCSR(ctx context.Context, clusterClient client.Client, username string) (string, error) {
x509csr, x509key, err := pkiutil.NewCSRAndKey(&certutil.Config{
CommonName: username,
Organization: nil,
AltNames: certutil.AltNames{},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
if err != nil {
klog.Errorf("Failed to create CSR and key for user %s: %v", username, err)
return "", err
}
var csrBuffer, keyBuffer bytes.Buffer
if err = pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(x509key)}); err != nil {
klog.Errorf("Failed to encode private key for user %s: %v", username, err)
return "", err
}
var csrBytes []byte
if csrBytes, err = x509.CreateCertificateRequest(rand.Reader, x509csr, x509key); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return "", err
}
if err = pem.Encode(&csrBuffer, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes}); err != nil {
klog.Errorf("Failed to encode CSR for user %s: %v", username, err)
return "", err
}
csr := csrBuffer.Bytes()
key := keyBuffer.Bytes()
csrName := fmt.Sprintf("%s-csr-%d", username, time.Now().Unix())
k8sCSR := &certificatesv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: csrName,
Annotations: map[string]string{kubeconfig.PrivateKeyAnnotation: string(key)},
},
Spec: certificatesv1.CertificateSigningRequestSpec{
Request: csr,
SignerName: certificatesv1.KubeAPIServerClientSignerName,
Usages: []certificatesv1.KeyUsage{certificatesv1.UsageKeyEncipherment, certificatesv1.UsageClientAuth, certificatesv1.UsageDigitalSignature},
Username: username,
Groups: []string{user.AllAuthenticated},
},
}
if err = clusterClient.Create(ctx, k8sCSR); err != nil {
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
return "", err
}
return approveCSR(ctx, clusterClient, k8sCSR)
}
func approveCSR(ctx context.Context, clusterClient client.Client, csr *certificatesv1.CertificateSigningRequest) (string, error) {
csr.Status = certificatesv1.CertificateSigningRequestStatus{
Conditions: []certificatesv1.CertificateSigningRequestCondition{{
Status: corev1.ConditionTrue,
Type: certificatesv1.CertificateApproved,
Reason: "KubeSphereApprove",
Message: "This CSR was approved by KubeSphere",
LastUpdateTime: metav1.Time{
Time: time.Now(),
},
}},
}
if err := clusterClient.SubResource("approval").Update(ctx, csr, &client.SubResourceUpdateOptions{SubResourceBody: csr}); err != nil {
return "", err
}
return csr.Name, nil
}

View File

@@ -0,0 +1,46 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package predicate
import (
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"kubesphere.io/kubesphere/pkg/controller/cluster/utils"
)
type ClusterStatusChangedPredicate struct {
predicate.Funcs
}
func (ClusterStatusChangedPredicate) Update(e event.UpdateEvent) bool {
oldCluster, ok := e.ObjectOld.(*clusterv1alpha1.Cluster)
if !ok {
return false
}
newCluster := e.ObjectNew.(*clusterv1alpha1.Cluster)
// cluster is ready
if !utils.IsClusterReady(oldCluster) && utils.IsClusterReady(newCluster) {
return true
}
if !utils.IsClusterSchedulable(oldCluster) && utils.IsClusterSchedulable(newCluster) {
return true
}
return false
}
func (ClusterStatusChangedPredicate) Create(_ event.CreateEvent) bool {
return false
}
func (ClusterStatusChangedPredicate) Delete(_ event.DeleteEvent) bool {
return false
}
func (ClusterStatusChangedPredicate) Generic(_ event.GenericEvent) bool {
return false
}

View File

@@ -1,313 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"context"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
)
// Following code copied from sigs.k8s.io/kubefed to avoid import collision
// UnjoinCluster performs all the necessary steps to remove the
// registration of a cluster from a KubeFed control plane provided the
// required set of parameters are passed in.
func unjoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, hostClusterName, unjoiningClusterName string, forceDeletion, dryRun bool, skipMemberClusterResources bool) error {
hostClientset, err := util.HostClientset(hostConfig)
if err != nil {
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
return err
}
var clusterClientset *kubeclient.Clientset
if clusterConfig != nil {
clusterClientset, err = util.ClusterClientset(clusterConfig)
if err != nil {
klog.V(2).Infof("Failed to get unjoining cluster clientset: %v", err)
if !forceDeletion {
return err
}
}
}
client, err := genericclient.New(hostConfig)
if err != nil {
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
return err
}
if clusterClientset != nil && !skipMemberClusterResources {
err := deleteRBACResources(clusterClientset, kubefedNamespace, unjoiningClusterName, hostClusterName, forceDeletion, dryRun)
if err != nil {
if !forceDeletion {
return err
}
klog.V(2).Infof("Failed to delete RBAC resources: %v", err)
}
err = deleteFedNSFromUnjoinCluster(hostClientset, clusterClientset, kubefedNamespace, unjoiningClusterName, dryRun)
if err != nil {
if !forceDeletion {
return err
}
klog.V(2).Infof("Failed to delete kubefed namespace: %v", err)
}
}
// deletionSucceeded when all operations in deleteRBACResources and deleteFedNSFromUnjoinCluster succeed.
err = deleteFederatedClusterAndSecret(hostClientset, client, kubefedNamespace, unjoiningClusterName, forceDeletion, dryRun)
if err != nil {
return err
}
return nil
}
// deleteKubeFedClusterAndSecret deletes a federated cluster resource that associates
// the cluster and secret.
func deleteFederatedClusterAndSecret(hostClientset kubeclient.Interface, client genericclient.Client,
kubefedNamespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
klog.V(2).Infof("Deleting kubefed cluster resource from namespace %q for unjoin cluster %q",
kubefedNamespace, unjoiningClusterName)
fedCluster := &fedv1b1.KubeFedCluster{}
err := client.Get(context.TODO(), fedCluster, kubefedNamespace, unjoiningClusterName)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
return errors.Wrapf(err, "Failed to get kubefed cluster \"%s/%s\"", kubefedNamespace, unjoiningClusterName)
}
err = hostClientset.CoreV1().Secrets(kubefedNamespace).Delete(context.Background(), fedCluster.Spec.SecretRef.Name,
metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Secret \"%s/%s\" does not exist in the host cluster.", kubefedNamespace, fedCluster.Spec.SecretRef.Name)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Failed to delete secret \"%s/%s\" for unjoin cluster %q",
kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted secret \"%s/%s\" for unjoin cluster %q", kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
}
err = client.Delete(context.TODO(), fedCluster, fedCluster.Namespace, fedCluster.Name)
if apierrors.IsNotFound(err) {
klog.V(2).Infof("KubeFed cluster \"%s/%s\" does not exist in the host cluster.", fedCluster.Namespace, fedCluster.Name)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Failed to delete kubefed cluster \"%s/%s\" for unjoin cluster %q", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted kubefed cluster \"%s/%s\" for unjoin cluster %q.", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
}
return nil
}
// deleteRBACResources deletes the cluster role, cluster rolebindings and service account
// from the unjoining cluster.
func deleteRBACResources(unjoiningClusterClientset kubeclient.Interface,
namespace, unjoiningClusterName, hostClusterName string, forceDeletion, dryRun bool) error {
saName := ClusterServiceAccountName(unjoiningClusterName, hostClusterName)
err := deleteClusterRoleAndBinding(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, forceDeletion, dryRun)
if err != nil {
return err
}
err = deleteServiceAccount(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, dryRun)
if err != nil {
return err
}
return nil
}
// deleteFedNSFromUnjoinCluster deletes the kubefed namespace from
// the unjoining cluster so long as the unjoining cluster is not the
// host cluster.
func deleteFedNSFromUnjoinCluster(hostClientset, unjoiningClusterClientset kubeclient.Interface,
kubefedNamespace, unjoiningClusterName string, dryRun bool) error {
if dryRun {
return nil
}
hostClusterNamespace, err := hostClientset.CoreV1().Namespaces().Get(context.Background(), kubefedNamespace, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "Error retrieving namespace %q from host cluster", kubefedNamespace)
}
unjoiningClusterNamespace, err := unjoiningClusterClientset.CoreV1().Namespaces().Get(context.Background(), kubefedNamespace, metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "Error retrieving namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
}
if IsPrimaryCluster(hostClusterNamespace, unjoiningClusterNamespace) {
klog.V(2).Infof("The kubefed namespace %q does not need to be deleted from the host cluster by unjoin.", kubefedNamespace)
return nil
}
klog.V(2).Infof("Deleting kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
err = unjoiningClusterClientset.CoreV1().Namespaces().Delete(context.Background(), kubefedNamespace, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("The kubefed namespace %q no longer exists in unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
return nil
} else if err != nil {
return errors.Wrapf(err, "Could not delete kubefed namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
} else {
klog.V(2).Infof("Deleted kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
}
return nil
}
// deleteServiceAccount deletes a service account in the cluster associated
// with clusterClientset with credentials that are used by the host cluster
// to access its API server.
func deleteServiceAccount(clusterClientset kubeclient.Interface, saName,
namespace, unjoiningClusterName string, dryRun bool) error {
if dryRun {
return nil
}
klog.V(2).Infof("Deleting service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
// Delete a service account.
err := clusterClientset.CoreV1().ServiceAccounts(namespace).Delete(context.Background(), saName,
metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Service account \"%s/%s\" does not exist.", namespace, saName)
} else if err != nil {
return errors.Wrapf(err, "Could not delete service account \"%s/%s\"", namespace, saName)
} else {
klog.V(2).Infof("Deleted service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
}
return nil
}
// deleteClusterRoleAndBinding deletes an RBAC cluster role and binding that
// allows the service account identified by saName to access all resources in
// all namespaces in the cluster associated with clusterClientset.
func deleteClusterRoleAndBinding(clusterClientset kubeclient.Interface,
saName, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
if dryRun {
return nil
}
roleName := util.RoleName(saName)
healthCheckRoleName := util.HealthCheckRoleName(saName, namespace)
// Attempt to delete all role and role bindings created by join
for _, name := range []string{roleName, healthCheckRoleName} {
klog.V(2).Infof("Deleting cluster role binding %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
err := clusterClientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), name, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Cluster role binding %q for service account %q does not exist in unjoining cluster %q.",
name, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete cluster role binding %q for service account %q in unjoining cluster %q",
name, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted cluster role binding %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
}
klog.V(2).Infof("Deleting cluster role %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
err = clusterClientset.RbacV1().ClusterRoles().Delete(context.Background(), name, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Cluster role %q for service account %q does not exist in unjoining cluster %q.",
name, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete cluster role %q for service account %q in unjoining cluster %q",
name, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted cluster role %q for service account %q in unjoining cluster %q.",
name, saName, unjoiningClusterName)
}
}
klog.V(2).Infof("Deleting role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
err := clusterClientset.RbacV1().RoleBindings(namespace).Delete(context.Background(), roleName, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Role binding \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete role binding \"%s/%s\" for service account %q in unjoining cluster %q",
namespace, roleName, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleted role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
}
klog.V(2).Infof("Deleting role \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
err = clusterClientset.RbacV1().Roles(namespace).Delete(context.Background(), roleName, metav1.DeleteOptions{})
if apierrors.IsNotFound(err) {
klog.V(2).Infof("Role \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
} else if err != nil {
wrappedErr := errors.Wrapf(err, "Could not delete role \"%s/%s\" for service account %q in unjoining cluster %q",
namespace, roleName, saName, unjoiningClusterName)
if !forceDeletion {
return wrappedErr
}
klog.V(2).Infof("%v", wrappedErr)
} else {
klog.V(2).Infof("Deleting Role \"%s/%s\" for service account %q in unjoining cluster %q.",
namespace, roleName, saName, unjoiningClusterName)
}
return nil
}

View File

@@ -1,65 +0,0 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
kubeclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// HostClientset provides a kubernetes API compliant clientset to
// communicate with the host cluster's kubernetes API server.
func HostClientset(config *rest.Config) (*kubeclient.Clientset, error) {
return kubeclient.NewForConfig(config)
}
// ClusterClientset provides a kubernetes API compliant clientset to
// communicate with the joining cluster's kubernetes API server.
func ClusterClientset(config *rest.Config) (*kubeclient.Clientset, error) {
return kubeclient.NewForConfig(config)
}
// ClusterServiceAccountName returns the name of a service account whose
// credentials are used by the host cluster to access the client cluster.
func ClusterServiceAccountName(joiningClusterName, hostClusterName string) string {
return fmt.Sprintf("%s-%s", joiningClusterName, hostClusterName)
}
// IsPrimaryCluster checks if the caller is working with objects for the
// primary cluster by checking if the UIDs match for both ObjectMetas passed
// in.
// TODO (font): Need to revisit this when cluster ID is available.
func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool {
meta := MetaAccessor(obj)
clusterMeta := MetaAccessor(clusterObj)
return meta.GetUID() == clusterMeta.GetUID()
}
func MetaAccessor(obj pkgruntime.Object) metav1.Object {
accessor, err := meta.Accessor(obj)
if err != nil {
// This should always succeed if obj is not nil. Also,
// adapters are slated for replacement by unstructured.
return nil
}
return accessor
}

View File

@@ -1,23 +1,17 @@
/*
Copyright 2022 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package utils
import (
"os"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
)
@@ -30,3 +24,70 @@ func IsClusterReady(cluster *clusterv1alpha1.Cluster) bool {
}
return false
}
func IsClusterSchedulable(cluster *clusterv1alpha1.Cluster) bool {
if !cluster.DeletionTimestamp.IsZero() {
return false
}
if !IsClusterReady(cluster) {
return false
}
for _, condition := range cluster.Status.Conditions {
if condition.Type == clusterv1alpha1.ClusterSchedulable && condition.Status == corev1.ConditionFalse {
return false
}
}
return true
}
func IsHostCluster(cluster *clusterv1alpha1.Cluster) bool {
if _, ok := cluster.Labels[clusterv1alpha1.HostCluster]; ok {
return true
}
return false
}
func BuildKubeconfigFromRestConfig(config *rest.Config) ([]byte, error) {
apiConfig := api.NewConfig()
apiCluster := &api.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
// generated kubeconfig will be used by cluster federation, CAFile is not
// accepted by kubefed, so we need read CAFile
if len(apiCluster.CertificateAuthorityData) == 0 && len(config.CAFile) != 0 {
caData, err := os.ReadFile(config.CAFile)
if err != nil {
return nil, err
}
apiCluster.CertificateAuthorityData = caData
}
apiConfig.Clusters["kubernetes"] = apiCluster
apiConfig.AuthInfos["kubernetes-admin"] = &api.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
Token: config.BearerToken,
}
if config.BearerTokenFile != "" {
newToken, _ := os.ReadFile(config.BearerToken)
if len(newToken) > 0 {
apiConfig.AuthInfos["kubernetes-admin"].Token = string(newToken)
}
}
apiConfig.Contexts["kubernetes-admin@kubernetes"] = &api.Context{
Cluster: "kubernetes",
AuthInfo: "kubernetes-admin",
}
apiConfig.CurrentContext = "kubernetes-admin@kubernetes"
return clientcmd.Write(*apiConfig)
}