feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -1,279 +1,66 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package v1alpha1
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/emicklei/go-restful/v3"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/cli-runtime/pkg/printers"
k8sinformers "k8s.io/client-go/informers"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"kubesphere.io/api/cluster/v1alpha1"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
"kubesphere.io/kubesphere/pkg/api"
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/config"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
apiv1alpha1 "kubesphere.io/kubesphere/pkg/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/config"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/multicluster"
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
"kubesphere.io/kubesphere/pkg/version"
)
const (
defaultAgentImage = "kubesphere/tower:v1.0"
defaultTimeout = 10 * time.Second
KubeSphereApiServer = "ks-apiserver"
)
var errClusterConnectionIsNotProxy = fmt.Errorf("cluster is not using proxy connection")
const defaultTimeout = 10 * time.Second
type handler struct {
ksclient kubesphere.Interface
serviceLister v1.ServiceLister
clusterLister clusterlister.ClusterLister
configMapLister v1.ConfigMapLister
proxyService string
proxyAddress string
agentImage string
yamlPrinter *printers.YAMLPrinter
}
func newHandler(ksclient kubesphere.Interface, k8sInformers k8sinformers.SharedInformerFactory, ksInformers externalversions.SharedInformerFactory, proxyService, proxyAddress, agentImage string) *handler {
if len(agentImage) == 0 {
agentImage = defaultAgentImage
}
return &handler{
ksclient: ksclient,
serviceLister: k8sInformers.Core().V1().Services().Lister(),
clusterLister: ksInformers.Cluster().V1alpha1().Clusters().Lister(),
configMapLister: k8sInformers.Core().V1().ConfigMaps().Lister(),
proxyService: proxyService,
proxyAddress: proxyAddress,
agentImage: agentImage,
yamlPrinter: &printers.YAMLPrinter{},
}
}
// generateAgentDeployment will return a deployment yaml for proxy connection type cluster
// ProxyPublishAddress takes high precedence over proxyPublishService, use proxyPublishService ingress
// address only when proxyPublishAddress is not provided.
func (h *handler) generateAgentDeployment(request *restful.Request, response *restful.Response) {
clusterName := request.PathParameter("cluster")
cluster, err := h.clusterLister.Get(clusterName)
if err != nil {
if errors.IsNotFound(err) {
api.HandleNotFound(response, request, err)
return
} else {
api.HandleInternalError(response, request, err)
return
}
}
if cluster.Spec.Connection.Type != v1alpha1.ConnectionTypeProxy {
api.HandleNotFound(response, request, fmt.Errorf("cluster %s is not using proxy connection", cluster.Name))
return
}
// use service ingress address
if len(h.proxyAddress) == 0 {
err = h.populateProxyAddress()
if err != nil {
api.HandleNotFound(response, request, err)
return
}
}
var buf bytes.Buffer
err = h.generateDefaultDeployment(cluster, &buf)
if err != nil {
api.HandleInternalError(response, request, err)
return
}
response.Write(buf.Bytes())
}
func (h *handler) populateProxyAddress() error {
if len(h.proxyService) == 0 {
return fmt.Errorf("neither proxy address nor proxy service provided")
}
namespace := "kubesphere-system"
parts := strings.Split(h.proxyService, ".")
if len(parts) > 1 && len(parts[1]) != 0 {
namespace = parts[1]
}
service, err := h.serviceLister.Services(namespace).Get(parts[0])
if err != nil {
return fmt.Errorf("service %s not found in namespace %s", parts[0], namespace)
}
if len(service.Spec.Ports) == 0 {
return fmt.Errorf("there are no ports in proxy service %s spec", h.proxyService)
}
port := service.Spec.Ports[0].Port
var serviceAddress string
for _, ingress := range service.Status.LoadBalancer.Ingress {
if len(ingress.Hostname) != 0 {
serviceAddress = fmt.Sprintf("http://%s:%d", ingress.Hostname, port)
}
if len(ingress.IP) != 0 {
serviceAddress = fmt.Sprintf("http://%s:%d", ingress.IP, port)
}
}
if len(serviceAddress) == 0 {
return fmt.Errorf("cannot generate agent deployment yaml for member cluster "+
" because %s service has no public address, please check %s status, or set address "+
" mannually in ClusterConfiguration", h.proxyService, h.proxyService)
}
h.proxyAddress = serviceAddress
return nil
}
// Currently, this method works because of serviceaccount/clusterrole/clusterrolebinding already
// created by kubesphere, we don't need to create them again. And it's a little bit inconvenient
// if we want to change the template.
// TODO(jeff): load template from configmap
func (h *handler) generateDefaultDeployment(cluster *v1alpha1.Cluster, w io.Writer) error {
_, err := url.Parse(h.proxyAddress)
if err != nil {
return fmt.Errorf("invalid proxy address %s, should format like http[s]://1.2.3.4:123", h.proxyAddress)
}
if cluster.Spec.Connection.Type == v1alpha1.ConnectionTypeDirect {
return errClusterConnectionIsNotProxy
}
agent := appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-agent",
Namespace: "kubesphere-system",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "agent",
"app.kubernetes.io/part-of": "tower",
},
},
Strategy: appsv1.DeploymentStrategy{},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "agent",
"app.kubernetes.io/part-of": "tower",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "agent",
Command: []string{
"/agent",
fmt.Sprintf("--name=%s", cluster.Name),
fmt.Sprintf("--token=%s", cluster.Spec.Connection.Token),
fmt.Sprintf("--proxy-server=%s", h.proxyAddress),
"--keepalive=10s",
"--kubesphere-service=ks-apiserver.kubesphere-system.svc:80",
"--kubernetes-service=kubernetes.default.svc:443",
"--v=0",
},
Image: h.agentImage,
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("200M"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("100M"),
},
},
},
},
ServiceAccountName: "kubesphere",
},
},
},
}
return h.yamlPrinter.PrintObj(&agent, w)
client runtimeclient.Client
}
// updateKubeConfig updates the kubeconfig of the specific cluster, this API is used to update expired kubeconfig.
func (h *handler) updateKubeConfig(request *restful.Request, response *restful.Response) {
var req clusterv1alpha1.UpdateClusterRequest
var req apiv1alpha1.UpdateClusterRequest
if err := request.ReadEntity(&req); err != nil {
api.HandleBadRequest(response, request, err)
return
}
ctx := request.Request.Context()
clusterName := request.PathParameter("cluster")
obj, err := h.clusterLister.Get(clusterName)
if err != nil {
cluster := &clusterv1alpha1.Cluster{}
if err := h.client.Get(ctx, types.NamespacedName{Name: clusterName}, cluster); err != nil {
api.HandleBadRequest(response, request, err)
return
}
cluster := obj.DeepCopy()
if _, ok := cluster.Labels[v1alpha1.HostCluster]; ok {
if _, ok := cluster.Labels[clusterv1alpha1.HostCluster]; ok {
api.HandleBadRequest(response, request, fmt.Errorf("update kubeconfig of the host cluster is not allowed"))
return
}
// For member clusters that use proxy mode, we don't need to update the kubeconfig,
// if the certs expired, just restart the tower component in the host cluster, it will renew the cert.
if cluster.Spec.Connection.Type == v1alpha1.ConnectionTypeProxy {
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
api.HandleBadRequest(response, request, fmt.Errorf(
"update kubeconfig of member clusters which using proxy mode is not allowed, their certs are managed and will be renewed by tower",
))
@@ -300,19 +87,18 @@ func (h *handler) updateKubeConfig(request *restful.Request, response *restful.R
return
}
_, err = validateKubeSphereAPIServer(config)
if err != nil {
if _, err = validateKubeSphereAPIServer(ctx, clientSet); err != nil {
api.HandleBadRequest(response, request, fmt.Errorf("unable validate kubesphere endpoint, %v", err))
return
}
err = h.validateMemberClusterConfiguration(clientSet)
if err != nil {
if err = h.validateMemberClusterConfiguration(ctx, clientSet); err != nil {
api.HandleBadRequest(response, request, fmt.Errorf("failed to validate member cluster configuration, err: %v", err))
return
}
// Check if the cluster is the same
kubeSystem, err := clientSet.CoreV1().Namespaces().Get(context.TODO(), metav1.NamespaceSystem, metav1.GetOptions{})
kubeSystem, err := clientSet.CoreV1().Namespaces().Get(ctx, metav1.NamespaceSystem, metav1.GetOptions{})
if err != nil {
api.HandleBadRequest(response, request, err)
return
@@ -325,8 +111,9 @@ func (h *handler) updateKubeConfig(request *restful.Request, response *restful.R
return
}
cluster = cluster.DeepCopy()
cluster.Spec.Connection.KubeConfig = req.KubeConfig
if _, err = h.ksclient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil {
if err = h.client.Update(ctx, cluster); err != nil {
api.HandleBadRequest(response, request, err)
return
}
@@ -335,15 +122,14 @@ func (h *handler) updateKubeConfig(request *restful.Request, response *restful.R
// ValidateCluster validate cluster kubeconfig and kubesphere apiserver address, check their accessibility
func (h *handler) validateCluster(request *restful.Request, response *restful.Response) {
var cluster v1alpha1.Cluster
err := request.ReadEntity(&cluster)
if err != nil {
var cluster clusterv1alpha1.Cluster
if err := request.ReadEntity(&cluster); err != nil {
api.HandleBadRequest(response, request, err)
return
}
ctx := request.Request.Context()
if cluster.Spec.Connection.Type != v1alpha1.ConnectionTypeDirect {
if cluster.Spec.Connection.Type != clusterv1alpha1.ConnectionTypeDirect {
api.HandleBadRequest(response, request, fmt.Errorf("cluster connection type MUST be direct"))
return
}
@@ -365,38 +151,46 @@ func (h *handler) validateCluster(request *restful.Request, response *restful.Re
return
}
if err = h.validateKubeConfig(cluster.Name, clientSet); err != nil {
if err = h.validateKubeConfig(ctx, cluster.Name, clientSet); err != nil {
api.HandleBadRequest(response, request, err)
return
}
if _, err = validateKubeSphereAPIServer(config); err != nil {
api.HandleBadRequest(response, request, fmt.Errorf("unable validate kubesphere endpoint, %v", err))
// Check if the cluster is managed by other host cluster
if err = clusterIsManaged(ctx, clientSet); err != nil {
api.HandleBadRequest(response, request, err)
return
}
if err = h.validateMemberClusterConfiguration(clientSet); err != nil {
api.HandleBadRequest(response, request, fmt.Errorf("failed to validate member cluster configuration, err: %v", err))
}
response.WriteHeader(http.StatusOK)
}
func clusterIsManaged(ctx context.Context, client kubernetes.Interface) error {
kubeSphereNamespace, err := client.CoreV1().Namespaces().Get(ctx, constants.KubeSphereNamespace, metav1.GetOptions{})
if err != nil {
return runtimeclient.IgnoreNotFound(err)
}
hostClusterName := kubeSphereNamespace.Annotations[clusterv1alpha1.AnnotationHostClusterName]
if hostClusterName != "" {
return fmt.Errorf("current cluster is managed by another host cluster '%s'", hostClusterName)
}
return nil
}
// validateKubeConfig takes base64 encoded kubeconfig and check its validity
func (h *handler) validateKubeConfig(clusterName string, clientSet kubernetes.Interface) error {
kubeSystem, err := clientSet.CoreV1().Namespaces().Get(context.TODO(), metav1.NamespaceSystem, metav1.GetOptions{})
func (h *handler) validateKubeConfig(ctx context.Context, clusterName string, clientSet kubernetes.Interface) error {
kubeSystem, err := clientSet.CoreV1().Namespaces().Get(ctx, metav1.NamespaceSystem, metav1.GetOptions{})
if err != nil {
return err
}
clusters, err := h.clusterLister.List(labels.Everything())
if err != nil {
clusterList := &clusterv1alpha1.ClusterList{}
if err := h.client.List(ctx, clusterList); err != nil {
return err
}
// clusters with the exactly same kube-system namespace UID considered to be one
// MUST not import the same cluster twice
for _, existedCluster := range clusters {
for _, existedCluster := range clusterList.Items {
if existedCluster.Status.UID == kubeSystem.UID {
return fmt.Errorf("cluster %s already exists (%s), MUST not import the same cluster twice", clusterName, existedCluster.Name)
}
@@ -407,77 +201,118 @@ func (h *handler) validateKubeConfig(clusterName string, clientSet kubernetes.In
}
// validateKubeSphereAPIServer uses version api to check the accessibility
func validateKubeSphereAPIServer(config *rest.Config) (*version.Info, error) {
transport, err := rest.TransportFor(config)
func validateKubeSphereAPIServer(ctx context.Context, clusterClient kubernetes.Interface) (*version.Info, error) {
response, err := clusterClient.CoreV1().Services(constants.KubeSphereNamespace).
ProxyGet("http", constants.KubeSphereAPIServerName, "80", "/version", nil).
DoRaw(ctx)
if err != nil {
return nil, err
}
client := http.Client{
Timeout: defaultTimeout,
Transport: transport,
}
response, err := client.Get(fmt.Sprintf("%s/api/v1/namespaces/%s/services/:%s:/proxy/kapis/version", config.Host, constants.KubeSphereNamespace, KubeSphereApiServer))
if err != nil {
return nil, err
}
defer response.Body.Close()
responseBytes, _ := io.ReadAll(response.Body)
responseBody := string(responseBytes)
response.Body = io.NopCloser(bytes.NewBuffer(responseBytes))
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("invalid response: %s , please make sure %s.%s.svc of member cluster is up and running", KubeSphereApiServer, constants.KubeSphereNamespace, responseBody)
return nil, fmt.Errorf("invalid response: %s, please make sure %s.%s.svc of member cluster is up and running", response, constants.KubeSphereAPIServerName, constants.KubeSphereNamespace)
}
ver := version.Info{}
err = json.NewDecoder(response.Body).Decode(&ver)
if err != nil {
return nil, fmt.Errorf("invalid response: %s , please make sure %s.%s.svc of member cluster is up and running", KubeSphereApiServer, constants.KubeSphereNamespace, responseBody)
if err = json.Unmarshal(response, &ver); err != nil {
return nil, fmt.Errorf("invalid response: %s, please make sure %s.%s.svc of member cluster is up and running", response, constants.KubeSphereAPIServerName, constants.KubeSphereNamespace)
}
return &ver, nil
}
// validateMemberClusterConfiguration compares host and member cluster jwt, if they are not same, it changes member
// cluster jwt to host's, then restart member cluster ks-apiserver.
func (h *handler) validateMemberClusterConfiguration(clientSet kubernetes.Interface) error {
hConfig, err := h.getHostClusterConfig()
if err != nil {
return err
}
mConfig, err := h.getMemberClusterConfig(clientSet)
func (h *handler) validateMemberClusterConfiguration(ctx context.Context, clientSet kubernetes.Interface) error {
hConfig, err := h.getHostClusterConfig(ctx)
if err != nil {
return err
}
if mConfig.MultiClusterOptions.ClusterRole != multicluster.ClusterRoleMember {
return fmt.Errorf("the clusterRole of the member cluster must be 'member'")
mConfig, err := h.getMemberClusterConfig(ctx, clientSet)
if err != nil {
return err
}
if hConfig.AuthenticationOptions.JwtSecret != mConfig.AuthenticationOptions.JwtSecret {
if hConfig.AuthenticationOptions.Issuer.JWTSecret != mConfig.AuthenticationOptions.Issuer.JWTSecret {
return fmt.Errorf("hostcluster Jwt is not equal to member cluster jwt, please edit the member cluster cluster config")
}
return nil
}
// getMemberClusterConfig returns KubeSphere running config by the given member cluster kubeconfig
func (h *handler) getMemberClusterConfig(clientSet kubernetes.Interface) (*config.Config, error) {
memberCm, err := clientSet.CoreV1().ConfigMaps(constants.KubeSphereNamespace).Get(context.Background(), constants.KubeSphereConfigName, metav1.GetOptions{})
func (h *handler) getMemberClusterConfig(ctx context.Context, clientSet kubernetes.Interface) (*config.Config, error) {
memberCm, err := clientSet.CoreV1().ConfigMaps(constants.KubeSphereNamespace).Get(ctx, constants.KubeSphereConfigName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return config.GetFromConfigMap(memberCm)
return config.FromConfigMap(memberCm)
}
// getHostClusterConfig returns KubeSphere running config from host cluster ConfigMap
func (h *handler) getHostClusterConfig() (*config.Config, error) {
hostCm, err := h.configMapLister.ConfigMaps(constants.KubeSphereNamespace).Get(constants.KubeSphereConfigName)
if err != nil {
return nil, fmt.Errorf("failed to get host cluster %s/configmap/%s, err: %s", constants.KubeSphereNamespace, constants.KubeSphereConfigName, err)
func (h *handler) getHostClusterConfig(ctx context.Context) (*config.Config, error) {
hostCm := &corev1.ConfigMap{}
key := types.NamespacedName{Namespace: constants.KubeSphereNamespace, Name: constants.KubeSphereConfigName}
if err := h.client.Get(ctx, key, hostCm); err != nil {
return nil, fmt.Errorf("failed to get host cluster %s/configmap/%s, err: %s",
constants.KubeSphereNamespace, constants.KubeSphereConfigName, err)
}
return config.GetFromConfigMap(hostCm)
return config.FromConfigMap(hostCm)
}
func (h *handler) visibilityAuth(req *restful.Request, resp *restful.Response) {
clusterName := req.PathParameter("cluster")
var visibilityRequests []apiv1alpha1.UpdateVisibilityRequest
if err := req.ReadEntity(&visibilityRequests); err != nil {
api.HandleBadRequest(resp, req, err)
return
}
patchData := make([]struct {
workspace tenantv1beta1.WorkspaceTemplate
patch runtimeclient.Patch
}, 0, 4)
for _, visibilityRequest := range visibilityRequests {
workspaceTemplate := tenantv1beta1.WorkspaceTemplate{}
if err := h.client.Get(context.Background(), types.NamespacedName{Name: visibilityRequest.Workspace}, &workspaceTemplate); err != nil {
api.HandleBadRequest(resp, req, err)
return
}
clusterSets := sets.New[string]()
for _, clusterRef := range workspaceTemplate.Spec.Placement.Clusters {
if clusterRef.Name != "" {
clusterSets.Insert(clusterRef.Name)
}
}
switch visibilityRequest.Op {
case "add":
clusterSets.Insert(clusterName)
case "remove":
if clusterSets.Has(clusterName) {
clusterSets.Delete(clusterName)
}
default:
api.HandleBadRequest(resp, req, errors.NewBadRequest("not support operation type"))
return
}
newClusters := make([]tenantv1beta1.GenericClusterReference, 0, clusterSets.Len())
for _, cluster := range clusterSets.UnsortedList() {
newClusters = append(newClusters, tenantv1beta1.GenericClusterReference{Name: cluster})
}
workspaceTemplateCopy := workspaceTemplate.DeepCopy()
workspaceTemplateCopy.Spec.Placement.Clusters = newClusters
patchData = append(patchData, struct {
workspace tenantv1beta1.WorkspaceTemplate
patch runtimeclient.Patch
}{workspace: *workspaceTemplateCopy, patch: runtimeclient.MergeFrom(&workspaceTemplate)})
}
for _, pd := range patchData {
if err := h.client.Patch(context.Background(), &pd.workspace, pd.patch); err != nil {
api.HandleBadRequest(resp, req, err)
return
}
}
resp.WriteHeader(http.StatusOK)
}

View File

@@ -1,55 +1,40 @@
//go:build exclude
/*
Copyright 2020 KubeSphere Authors
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO test failed
package v1alpha1
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"testing"
"github.com/google/go-cmp/cmp"
runtimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
"kubesphere.io/kubesphere/pkg/scheme"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/client-go/kubernetes"
k8s "k8s.io/client-go/kubernetes"
k8sfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/informers"
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
)
const (
proxyAddress = "http://139.198.121.121:8080"
agentImage = "kubesphere/tower:v1.0"
proxyService = "tower.kubesphere-system.svc"
)
var cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "gondor",
@@ -62,31 +47,6 @@ var cluster = &v1alpha1.Cluster{
},
}
var service = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "tower",
Namespace: "kubesphere-system",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Port: 8080,
Protocol: corev1.ProtocolTCP,
},
},
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
IP: "139.198.121.121",
Hostname: "foo.bar",
},
},
},
},
}
var hostMap = map[string]string{
"kubesphere.yaml": `
monitoring:
@@ -96,8 +56,6 @@ authentication:
oauthOptions:
accessTokenMaxAge: 0s
accessTokenInactivityTimeout: 0s
multicluster:
clusterRole: host
`,
}
@@ -106,12 +64,10 @@ var memberMap = map[string]string{
monitoring:
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
authentication:
jwtSecret: sQh3JOqNbmci6Gu94TeV10AY7ipltwjp
jwtSecret: sQh3JOqNbmci6Gu94TeV10AY7ipltwj
oauthOptions:
accessTokenMaxAge: 0s
accessTokenInactivityTimeout: 0s
multicluster:
clusterRole: member
`,
}
@@ -178,134 +134,6 @@ var ksApiserverDeploy = `
}
}`
var expected = `apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
name: cluster-agent
namespace: kubesphere-system
spec:
selector:
matchLabels:
app: agent
app.kubernetes.io/part-of: tower
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: agent
app.kubernetes.io/part-of: tower
spec:
containers:
- command:
- /agent
- --name=gondor
- --token=randomtoken
- --proxy-server=http://139.198.121.121:8080
- --keepalive=10s
- --kubesphere-service=ks-apiserver.kubesphere-system.svc:80
- --kubernetes-service=kubernetes.default.svc:443
- --v=0
image: kubesphere/tower:v1.0
name: agent
resources:
limits:
cpu: "1"
memory: 200M
requests:
cpu: 100m
memory: 100M
serviceAccountName: kubesphere
status: {}
`
func TestGeranteAgentDeployment(t *testing.T) {
k8sclient := k8sfake.NewSimpleClientset(service)
ksclient := fake.NewSimpleClientset(cluster)
informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil, nil)
informersFactory.KubernetesSharedInformerFactory().Core().V1().Services().Informer().GetIndexer().Add(service)
informersFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Informer().GetIndexer().Add(cluster)
directConnectionCluster := cluster.DeepCopy()
directConnectionCluster.Spec.Connection.Type = v1alpha1.ConnectionTypeDirect
var testCases = []struct {
description string
expectingError bool
expectedError error
cluster *v1alpha1.Cluster
expected string
}{
{
description: "test normal case",
expectingError: false,
expected: expected,
cluster: cluster,
},
{
description: "test direct connection cluster",
expectingError: true,
expectedError: errClusterConnectionIsNotProxy,
cluster: directConnectionCluster,
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
h := newHandler(ksclient, informersFactory.KubernetesSharedInformerFactory(),
informersFactory.KubeSphereSharedInformerFactory(),
proxyService,
"",
agentImage)
var buf bytes.Buffer
err := h.populateProxyAddress()
if err != nil {
t.Error(err)
}
err = h.generateDefaultDeployment(testCase.cluster, &buf)
if testCase.expectingError {
if err == nil {
t.Fatalf("expecting error %v, got nil", testCase.expectedError)
} else if err != testCase.expectedError {
t.Fatalf("expecting error %v, got %v", testCase.expectedError, err)
}
}
if diff := cmp.Diff(testCase.expected, buf.String()); len(diff) != 0 {
t.Errorf("%T, got +, expected -, %s", testCase.expected, diff)
}
})
}
}
func TestInnerGenerateAgentDeployment(t *testing.T) {
h := &handler{
proxyAddress: proxyAddress,
agentImage: agentImage,
yamlPrinter: &printers.YAMLPrinter{},
}
var buf bytes.Buffer
err := h.generateDefaultDeployment(cluster, &buf)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(buf.String(), expected); len(diff) != 0 {
t.Error(diff)
}
}
var base64EncodedKubeConfig = `
apiVersion: v1
clusters:
@@ -329,19 +157,10 @@ users:
`
func TestValidateKubeConfig(t *testing.T) {
k8sclient := k8sfake.NewSimpleClientset(service)
ksclient := fake.NewSimpleClientset(cluster)
informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil, nil)
informersFactory.KubernetesSharedInformerFactory().Core().V1().Services().Informer().GetIndexer().Add(service)
informersFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Informer().GetIndexer().Add(cluster)
h := newHandler(ksclient, informersFactory.KubernetesSharedInformerFactory(),
informersFactory.KubeSphereSharedInformerFactory(),
proxyService,
"",
agentImage)
client := runtimefakeclient.NewClientBuilder().
WithScheme(scheme.Scheme).
Build()
h := newHandler(client)
config, err := k8sutil.LoadKubeConfigFromBytes([]byte(base64EncodedKubeConfig))
if err != nil {
@@ -388,20 +207,11 @@ func TestValidateKubeConfig(t *testing.T) {
}
func TestValidateMemberClusterConfiguration(t *testing.T) {
k8sclient := k8sfake.NewSimpleClientset(service)
ksclient := fake.NewSimpleClientset(cluster)
client := runtimefakeclient.NewClientBuilder().
WithScheme(scheme.Scheme).
Build()
informersFactory := informers.NewInformerFactories(k8sclient, ksclient, nil, nil, nil, nil)
informersFactory.KubernetesSharedInformerFactory().Core().V1().Services().Informer().GetIndexer().Add(service)
informersFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Informer().GetIndexer().Add(cluster)
informersFactory.KubernetesSharedInformerFactory().Core().V1().ConfigMaps().Informer().GetIndexer().Add(hostCm)
h := newHandler(ksclient, informersFactory.KubernetesSharedInformerFactory(),
informersFactory.KubeSphereSharedInformerFactory(),
proxyService,
"",
agentImage)
h := newHandler(client)
config, err := k8sutil.LoadKubeConfigFromBytes([]byte(base64EncodedKubeConfig))
if err != nil {
@@ -441,10 +251,19 @@ func TestValidateMemberClusterConfiguration(t *testing.T) {
t.Fatal(err)
}
addMemberClusterResource(memberCm, t)
if err = h.validateMemberClusterConfiguration(clientSet); err != nil {
addMemberClusterResource(hostCm, t)
err = h.validateMemberClusterConfiguration(clientSet)
if err != nil {
t.Fatal(err)
}
addMemberClusterResource(memberCm, t)
err = h.validateMemberClusterConfiguration(clientSet)
if err == nil {
t.Fatal()
}
t.Log(err)
}
func addMemberClusterResource(targetCm *corev1.ConfigMap, t *testing.T) {

View File

@@ -0,0 +1,196 @@
/*
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package v1alpha1
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/emicklei/go-restful/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/api"
apiv1alpha1 "kubesphere.io/kubesphere/pkg/api/cluster/v1alpha1"
)
func labelExists(req apiv1alpha1.CreateLabelRequest, labels *clusterv1alpha1.LabelList) bool {
for _, label := range labels.Items {
if label.Spec.Key == req.Key && label.Spec.Value == req.Value {
return true
}
}
return false
}
func (h *handler) createLabels(request *restful.Request, response *restful.Response) {
var labelRequests []apiv1alpha1.CreateLabelRequest
if err := request.ReadEntity(&labelRequests); err != nil {
api.HandleBadRequest(response, request, err)
return
}
allLabels := &clusterv1alpha1.LabelList{}
if err := h.client.List(context.Background(), allLabels); err != nil {
api.HandleBadRequest(response, request, err)
return
}
results := make([]*clusterv1alpha1.Label, 0)
for _, r := range labelRequests {
if labelExists(r, allLabels) {
api.HandleBadRequest(response, request, fmt.Errorf("label %s/%s already exists", r.Key, r.Value))
return
}
obj := &clusterv1alpha1.Label{
ObjectMeta: metav1.ObjectMeta{
Name: rand.String(6),
Finalizers: []string{clusterv1alpha1.LabelFinalizer},
},
Spec: clusterv1alpha1.LabelSpec{
Key: strings.TrimSpace(r.Key),
Value: strings.TrimSpace(r.Value),
},
}
if err := h.client.Create(context.Background(), obj); err != nil {
api.HandleBadRequest(response, request, err)
return
}
results = append(results, obj)
}
response.WriteEntity(results)
}
func (h *handler) updateLabel(request *restful.Request, response *restful.Response) {
label := &clusterv1alpha1.Label{}
if err := h.client.Get(context.Background(), types.NamespacedName{Name: request.PathParameter("label")}, label); err != nil {
api.HandleBadRequest(response, request, err)
return
}
switch request.QueryParameter("action") {
case "unbind": // unbind clusters
var unbindRequest apiv1alpha1.UnbindClustersRequest
if err := request.ReadEntity(&unbindRequest); err != nil {
api.HandleBadRequest(response, request, err)
return
}
for _, name := range unbindRequest.Clusters {
cluster := &clusterv1alpha1.Cluster{}
if err := h.client.Get(context.Background(), types.NamespacedName{Name: name}, cluster); err != nil {
api.HandleBadRequest(response, request, err)
return
}
cluster = cluster.DeepCopy()
delete(cluster.Labels, fmt.Sprintf(clusterv1alpha1.ClusterLabelFormat, label.Name))
if err := h.client.Update(context.Background(), cluster); err != nil {
api.HandleBadRequest(response, request, err)
return
}
}
clusters := sets.NewString(label.Spec.Clusters...)
clusters.Delete(unbindRequest.Clusters...)
label.Spec.Clusters = clusters.List()
if err := h.client.Update(context.Background(), label); err != nil {
api.HandleBadRequest(response, request, err)
return
}
response.WriteEntity(label)
default: // update label key/value
var labelRequest apiv1alpha1.CreateLabelRequest
if err := request.ReadEntity(&labelRequest); err != nil {
api.HandleBadRequest(response, request, err)
return
}
allLabels := &clusterv1alpha1.LabelList{}
if err := h.client.List(context.Background(), allLabels); err != nil {
api.HandleBadRequest(response, request, err)
return
}
if labelExists(labelRequest, allLabels) {
api.HandleBadRequest(response, request, fmt.Errorf("label %s/%s already exists", labelRequest.Key, labelRequest.Value))
return
}
label.Spec.Key = strings.TrimSpace(labelRequest.Key)
label.Spec.Value = strings.TrimSpace(labelRequest.Value)
if err := h.client.Update(context.Background(), label); err != nil {
api.HandleBadRequest(response, request, err)
return
}
response.WriteEntity(label)
}
}
func (h *handler) deleteLabels(request *restful.Request, response *restful.Response) {
var names []string
if err := request.ReadEntity(&names); err != nil {
api.HandleBadRequest(response, request, err)
return
}
for _, name := range names {
label := &clusterv1alpha1.Label{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
if err := h.client.Delete(context.Background(), label); err != nil {
api.HandleBadRequest(response, request, err)
return
}
}
response.WriteHeader(http.StatusOK)
}
func (h *handler) bindingClusters(request *restful.Request, response *restful.Response) {
var bindingRequest apiv1alpha1.BindingClustersRequest
if err := request.ReadEntity(&bindingRequest); err != nil {
api.HandleBadRequest(response, request, err)
return
}
for _, name := range bindingRequest.Labels {
label := &clusterv1alpha1.Label{}
if err := h.client.Get(context.Background(), types.NamespacedName{Name: name}, label); err != nil {
api.HandleBadRequest(response, request, err)
return
}
label.Spec.Clusters = append(label.Spec.Clusters, bindingRequest.Clusters...)
if err := h.client.Update(context.Background(), label); err != nil {
api.HandleBadRequest(response, request, err)
return
}
}
response.WriteHeader(http.StatusOK)
}
func (h *handler) listLabelGroups(request *restful.Request, response *restful.Response) {
allLabels := &clusterv1alpha1.LabelList{}
if err := h.client.List(context.Background(), allLabels); err != nil {
api.HandleBadRequest(response, request, err)
return
}
results := make(map[string][]apiv1alpha1.LabelValue)
for _, label := range allLabels.Items {
results[label.Spec.Key] = append(results[label.Spec.Key], apiv1alpha1.LabelValue{
Value: label.Spec.Value,
ID: label.Name,
})
}
response.WriteEntity(results)
}

View File

@@ -1,18 +1,7 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
* Please refer to the LICENSE file in the root directory of the project.
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
*/
package v1alpha1
@@ -22,13 +11,15 @@ import (
restfulspec "github.com/emicklei/go-restful-openapi/v2"
"github.com/emicklei/go-restful/v3"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sinformers "k8s.io/client-go/informers"
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
"kubesphere.io/kubesphere/pkg/api"
apiv1alpha1 "kubesphere.io/kubesphere/pkg/api/cluster/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/rest"
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/constants"
)
const (
@@ -37,40 +28,78 @@ const (
var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
func AddToContainer(container *restful.Container,
ksclient kubesphere.Interface,
k8sInformers k8sinformers.SharedInformerFactory,
ksInformers externalversions.SharedInformerFactory,
proxyService string,
proxyAddress string,
agentImage string) error {
func NewHandler(cacheClient runtimeclient.Client) rest.Handler {
return &handler{
client: cacheClient,
}
}
func NewFakeHandler() rest.Handler {
return &handler{}
}
func (h *handler) AddToContainer(container *restful.Container) error {
webservice := runtime.NewWebService(GroupVersion)
h := newHandler(ksclient, k8sInformers, ksInformers, proxyService, proxyAddress, agentImage)
// returns deployment yaml for cluster agent
webservice.Route(webservice.GET("/clusters/{cluster}/agent/deployment").
Doc("Return deployment yaml for cluster agent.").
Param(webservice.PathParameter("cluster", "Name of the cluster.").Required(true)).
To(h.generateAgentDeployment).
Returns(http.StatusOK, api.StatusOK, nil).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.MultiClusterTag}))
// TODO use validating admission webhook instead
webservice.Route(webservice.POST("/clusters/validation").
Doc("").
Param(webservice.BodyParameter("cluster", "cluster specification")).
To(h.validateCluster).
Returns(http.StatusOK, api.StatusOK, nil).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.MultiClusterTag}))
Deprecate().
Doc("Cluster validation").
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}).
Reads(clusterv1alpha1.Cluster{}).
Returns(http.StatusOK, api.StatusOK, nil))
webservice.Route(webservice.PUT("/clusters/{cluster}/kubeconfig").
Doc("Update cluster kubeconfig.").
Param(webservice.PathParameter("cluster", "Name of the cluster.").Required(true)).
To(h.updateKubeConfig).
Doc("Update kubeconfig").
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}).
Param(webservice.PathParameter("cluster", "The specified cluster.").Required(true)).
Returns(http.StatusOK, api.StatusOK, nil))
webservice.Route(webservice.POST("/labels").
Doc("Create cluster labels.").
Reads([]apiv1alpha1.CreateLabelRequest{}).
To(h.createLabels).
Returns(http.StatusOK, api.StatusOK, api.ListResult{}).
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}))
webservice.Route(webservice.DELETE("/labels").
Doc("Delete cluster labels.").
Reads([]string{}).
To(h.deleteLabels).
Returns(http.StatusOK, api.StatusOK, nil).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.MultiClusterTag}))
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}))
webservice.Route(webservice.PUT("/labels/{label}").
Doc("Update a label.").
Param(webservice.PathParameter("label", "Name of the label.").Required(true)).
Reads(apiv1alpha1.CreateLabelRequest{}).
To(h.updateLabel).
Returns(http.StatusOK, api.StatusOK, clusterv1alpha1.Label{}).
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}))
webservice.Route(webservice.POST("/labelbindings").
Doc("Binding clusters.").
Reads([]apiv1alpha1.BindingClustersRequest{}).
To(h.bindingClusters).
Returns(http.StatusOK, api.StatusOK, nil).
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}))
webservice.Route(webservice.GET("/labels").
Doc("List labels.").
To(h.listLabelGroups).
Returns(http.StatusOK, api.StatusOK, map[string][]apiv1alpha1.LabelValue{}).
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagMultiCluster}))
webservice.Route(webservice.POST("/clusters/{cluster}/grantrequests").
To(h.visibilityAuth).
Doc("Patch workspace template's visibility in different clusters").
Operation("patch-workspace-template-clusters-visibility").
Metadata(restfulspec.KeyOpenAPITags, []string{api.TagUserRelatedResources}).
Param(webservice.PathParameter("cluster", "The specified cluster.").Required(true)).
Reads([]apiv1alpha1.UpdateVisibilityRequest{}).
Returns(http.StatusOK, api.StatusOK, tenantv1beta1.WorkspaceTemplate{}))
container.Add(webservice)
return nil
}