change cluster schema (#2026)
* change cluster schema * change cluster schema
This commit is contained in:
@@ -3,23 +3,30 @@ package cluster
|
||||
import (
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
clusterclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1"
|
||||
clusterinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1"
|
||||
clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -30,17 +37,31 @@ const (
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
|
||||
kubefedNamespace = "kube-federation-system"
|
||||
|
||||
hostClusterName = "kubesphere"
|
||||
|
||||
// allocate kubernetesAPIServer port in range [portRangeMin, portRangeMax] for agents if port is not specified
|
||||
// kubesphereAPIServer port is defaulted to kubernetesAPIServerPort + 10000
|
||||
portRangeMin = 6000
|
||||
portRangeMax = 7000
|
||||
|
||||
// Service port
|
||||
kubernetesPort = 6443
|
||||
kubespherePort = 80
|
||||
|
||||
defaultAgentNamespace = "kubesphere-system"
|
||||
)
|
||||
|
||||
type ClusterController struct {
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
agentClient clusterclient.AgentInterface
|
||||
clusterClient clusterclient.ClusterInterface
|
||||
client kubernetes.Interface
|
||||
hostConfig *rest.Config
|
||||
|
||||
agentLister clusterlister.AgentLister
|
||||
agentHasSynced cache.InformerSynced
|
||||
clusterClient clusterclient.ClusterInterface
|
||||
|
||||
clusterLister clusterlister.ClusterLister
|
||||
clusterHasSynced cache.InformerSynced
|
||||
@@ -52,9 +73,8 @@ type ClusterController struct {
|
||||
|
||||
func NewClusterController(
|
||||
client kubernetes.Interface,
|
||||
config *rest.Config,
|
||||
clusterInformer clusterinformer.ClusterInformer,
|
||||
agentInformer clusterinformer.AgentInformer,
|
||||
agentClient clusterclient.AgentInterface,
|
||||
clusterClient clusterclient.ClusterInterface,
|
||||
) *ClusterController {
|
||||
|
||||
@@ -62,38 +82,35 @@ func NewClusterController(
|
||||
broadcaster.StartLogging(func(format string, args ...interface{}) {
|
||||
klog.Info(fmt.Sprintf(format, args))
|
||||
})
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cluster-controller"})
|
||||
|
||||
c := &ClusterController{
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
agentClient: agentClient,
|
||||
client: client,
|
||||
hostConfig: config,
|
||||
clusterClient: clusterClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cluster"),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
c.agentLister = agentInformer.Lister()
|
||||
c.agentHasSynced = agentInformer.Informer().HasSynced
|
||||
|
||||
c.clusterLister = clusterInformer.Lister()
|
||||
c.clusterHasSynced = clusterInformer.Informer().HasSynced
|
||||
|
||||
clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addCluster,
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
newCluster := newObj.(*clusterv1alpha1.Cluster)
|
||||
oldCluster := oldObj.(*clusterv1alpha1.Cluster)
|
||||
if newCluster.ResourceVersion == oldCluster.ResourceVersion {
|
||||
return
|
||||
}
|
||||
c.addCluster(newObj)
|
||||
},
|
||||
DeleteFunc: c.addCluster,
|
||||
})
|
||||
|
||||
agentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nil,
|
||||
UpdateFunc: nil,
|
||||
DeleteFunc: nil,
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -108,7 +125,7 @@ func (c *ClusterController) Run(workers int, stopCh <-chan struct{}) error {
|
||||
klog.V(0).Info("starting cluster controller")
|
||||
defer klog.Info("shutting down cluster controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced, c.agentHasSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced) {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
@@ -156,87 +173,211 @@ func (c *ClusterController) syncCluster(key string) error {
|
||||
// cluster not found, possibly been deleted
|
||||
// need to do the cleanup
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = c.agentLister.Get(name)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get cluster agent %s, %#v", name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// do the real cleanup work
|
||||
err = c.agentClient.Delete(name, &metav1.DeleteOptions{})
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Errorf("Failed to get cluster with name %s, %#v", name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
newAgent := &clusterv1alpha1.Agent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "tower",
|
||||
"cluster.kubesphere.io/name": name,
|
||||
},
|
||||
},
|
||||
Spec: clusterv1alpha1.AgentSpec{
|
||||
Token: "",
|
||||
KubeSphereAPIServerPort: 0,
|
||||
KubernetesAPIServerPort: 0,
|
||||
Proxy: "",
|
||||
Paused: !cluster.Spec.Active,
|
||||
},
|
||||
}
|
||||
// proxy service name if needed
|
||||
serviceName := fmt.Sprintf("mc-%s", cluster.Name)
|
||||
|
||||
agent, err := c.agentLister.Get(name)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
agent, err = c.agentClient.Create(newAgent)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create agent %s, %#v", name, err)
|
||||
return err
|
||||
if cluster.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object. This is equivalent
|
||||
// registering our finalizer.
|
||||
if !sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) {
|
||||
cluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterv1alpha1.Finalizer)
|
||||
if cluster, err = c.clusterClient.Update(cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) {
|
||||
// need to unJoin federation first, before there are
|
||||
// some cleanup work to do in member cluster which depends
|
||||
// agent to proxy traffic
|
||||
err = c.unJoinFederation(nil, name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unjoin federation for cluster %s, error %v", name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// nothing to do
|
||||
} else {
|
||||
klog.Errorf("Failed to get proxy service %s, error %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = c.client.CoreV1().Services(defaultAgentNamespace).Delete(serviceName, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to delete service %s, error %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
finalizers := sets.NewString(cluster.ObjectMeta.Finalizers...)
|
||||
finalizers.Delete(clusterv1alpha1.Finalizer)
|
||||
cluster.ObjectMeta.Finalizers = finalizers.List()
|
||||
if _, err = c.clusterClient.Update(cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
oldCluster := cluster.DeepCopy()
|
||||
|
||||
// prepare for proxy to member cluster
|
||||
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
|
||||
if cluster.Spec.Connection.KubeSphereAPIServerPort == 0 ||
|
||||
cluster.Spec.Connection.KubernetesAPIServerPort == 0 {
|
||||
port, err := c.allocatePort()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Spec.Connection.KubernetesAPIServerPort = port
|
||||
cluster.Spec.Connection.KubeSphereAPIServerPort = port + 10000
|
||||
}
|
||||
|
||||
// token uninitialized, generate a new token
|
||||
if len(cluster.Spec.Connection.Token) == 0 {
|
||||
cluster.Spec.Connection.Token = c.generateToken()
|
||||
}
|
||||
|
||||
mcService := v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: cluster.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": serviceName,
|
||||
"app": serviceName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app.kubernetes.io/name": "tower",
|
||||
"app": "tower",
|
||||
},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "kubernetes",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: kubernetesPort,
|
||||
TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubernetesAPIServerPort)),
|
||||
},
|
||||
{
|
||||
Name: "kubesphere",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: kubespherePort,
|
||||
TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubeSphereAPIServerPort)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service, err := c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Create(&mcService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
} else {
|
||||
if !reflect.DeepEqual(service.Spec, mcService.Spec) {
|
||||
mcService.ObjectMeta = service.ObjectMeta
|
||||
mcService.Spec.ClusterIP = service.Spec.ClusterIP
|
||||
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Update(&mcService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// populated the kubernetes apiEndpoint and kubesphere apiEndpoint
|
||||
cluster.Spec.Connection.KubernetesAPIEndpoint = fmt.Sprintf("https://%s:%d", service.Spec.ClusterIP, kubernetesPort)
|
||||
cluster.Spec.Connection.KubeSphereAPIEndpoint = fmt.Sprintf("http://%s:%d", service.Spec.ClusterIP, kubespherePort)
|
||||
|
||||
if !reflect.DeepEqual(oldCluster.Spec, cluster.Spec) {
|
||||
cluster, err = c.clusterClient.Update(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(cluster.Spec.Connection.KubeConfig) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var clientSet kubernetes.Interface
|
||||
var clusterConfig *rest.Config
|
||||
|
||||
// prepare for
|
||||
clientConfig, err := clientcmd.NewClientConfigFromBytes(cluster.Spec.Connection.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get agent %s, %#v", name, err)
|
||||
klog.Errorf("Unable to create client config from kubeconfig bytes, %#v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if agent.Spec.Paused != newAgent.Spec.Paused {
|
||||
agent.Spec.Paused = newAgent.Spec.Paused
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err = c.agentClient.Update(agent)
|
||||
return err
|
||||
})
|
||||
clusterConfig, err = clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get client config, %#v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// agent connection is ready, update cluster status
|
||||
// set
|
||||
if len(agent.Status.KubeConfig) != 0 && c.isAgentReady(agent) {
|
||||
clientConfig, err := clientcmd.NewClientConfigFromBytes(agent.Status.KubeConfig)
|
||||
clientSet, err = kubernetes.NewForConfig(clusterConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create ClientSet from config, %#v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cluster.Spec.JoinFederation { // trying to unJoin federation
|
||||
err = c.unJoinFederation(clusterConfig, cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to create client config from kubeconfig bytes, %#v", err)
|
||||
klog.Errorf("Failed to unJoin federation for cluster %s, error %v", cluster.Name, err)
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeWarning, "UnJoinFederation", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
} else { // join federation
|
||||
_, err = c.joinFederation(clusterConfig, cluster.Name, cluster.Labels)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get client config, %#v", err)
|
||||
klog.Errorf("Failed to join federation for cluster %s, error %v", cluster.Name, err)
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeWarning, "JoinFederation", err.Error())
|
||||
return err
|
||||
}
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeNormal, "JoinFederation", "Cluster has joined federation.")
|
||||
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create ClientSet from config, %#v", err)
|
||||
return nil
|
||||
federationReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterFederated,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "",
|
||||
Message: "Cluster has joined federation control plane successfully",
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, federationReadyCondition)
|
||||
}
|
||||
|
||||
// cluster agent is ready, we can pull kubernetes cluster info through agent
|
||||
// since there is no agent necessary for host cluster, so updates for host cluster
|
||||
// is safe.
|
||||
if isConditionTrue(cluster, clusterv1alpha1.ClusterAgentAvailable) ||
|
||||
cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeDirect {
|
||||
version, err := clientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get kubernetes version, %#v", err)
|
||||
@@ -252,28 +393,25 @@ func (c *ClusterController) syncCluster(key string) error {
|
||||
}
|
||||
|
||||
cluster.Status.NodeCount = len(nodes.Items)
|
||||
|
||||
clusterReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: string(clusterv1alpha1.ClusterReady),
|
||||
Message: "Cluster is available now",
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, clusterReadyCondition)
|
||||
}
|
||||
|
||||
agentReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterAgentAvailable,
|
||||
LastUpdateTime: metav1.NewTime(time.Now()),
|
||||
LastTransitionTime: metav1.NewTime(time.Now()),
|
||||
Reason: "",
|
||||
Message: "Cluster agent is available now.",
|
||||
}
|
||||
|
||||
if c.isAgentReady(agent) {
|
||||
agentReadyCondition.Status = v1.ConditionTrue
|
||||
} else {
|
||||
agentReadyCondition.Status = v1.ConditionFalse
|
||||
}
|
||||
|
||||
c.addClusterCondition(cluster, agentReadyCondition)
|
||||
|
||||
_, err = c.clusterClient.Update(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update cluster status, %#v", err)
|
||||
return err
|
||||
if !reflect.DeepEqual(oldCluster, cluster) {
|
||||
_, err = c.clusterClient.Update(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update cluster status, %#v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -298,50 +436,126 @@ func (c *ClusterController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if c.queue.NumRequeues(key) < maxRetries {
|
||||
klog.V(2).Infof("Error syncing virtualservice %s for service retrying, %#v", key, err)
|
||||
klog.V(2).Infof("Error syncing cluster %s, retrying, %v", key, err)
|
||||
c.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Dropping service %s out of the queue.", key)
|
||||
klog.V(4).Infof("Dropping cluster %s out of the queue.", key)
|
||||
c.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
||||
func (c *ClusterController) addAgent(obj interface{}) {
|
||||
agent := obj.(*clusterv1alpha1.Agent)
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("get agent key %s failed", agent.Name))
|
||||
return
|
||||
}
|
||||
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
func (c *ClusterController) isAgentReady(agent *clusterv1alpha1.Agent) bool {
|
||||
for _, condition := range agent.Status.Conditions {
|
||||
if condition.Type == clusterv1alpha1.AgentConnected && condition.Status == v1.ConditionTrue {
|
||||
func isConditionTrue(cluster *clusterv1alpha1.Cluster, conditionType clusterv1alpha1.ClusterConditionType) bool {
|
||||
for _, condition := range cluster.Status.Conditions {
|
||||
if condition.Type == conditionType && condition.Status == v1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// addClusterCondition add condition
|
||||
func (c *ClusterController) addClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) {
|
||||
// updateClusterCondition updates condition in cluster conditions using giving condition
|
||||
// adds condition if not existed
|
||||
func (c *ClusterController) updateClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) {
|
||||
if cluster.Status.Conditions == nil {
|
||||
cluster.Status.Conditions = make([]clusterv1alpha1.ClusterCondition, 0)
|
||||
}
|
||||
|
||||
newConditions := make([]clusterv1alpha1.ClusterCondition, 0)
|
||||
needToUpdate := true
|
||||
for _, cond := range cluster.Status.Conditions {
|
||||
if cond.Type == condition.Type {
|
||||
continue
|
||||
if cond.Status == condition.Status {
|
||||
needToUpdate = false
|
||||
continue
|
||||
} else {
|
||||
newConditions = append(newConditions, cond)
|
||||
}
|
||||
}
|
||||
newConditions = append(newConditions, cond)
|
||||
}
|
||||
|
||||
newConditions = append(newConditions, condition)
|
||||
cluster.Status.Conditions = newConditions
|
||||
if needToUpdate {
|
||||
newConditions = append(newConditions, condition)
|
||||
cluster.Status.Conditions = newConditions
|
||||
}
|
||||
}
|
||||
|
||||
func isHostCluster(cluster *clusterv1alpha1.Cluster) bool {
|
||||
for k, v := range cluster.Annotations {
|
||||
if k == clusterv1alpha1.IsHostCluster && v == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// joinFederation joins a cluster into federation clusters.
|
||||
// return nil error if kubefed cluster already exists.
|
||||
func (c *ClusterController) joinFederation(clusterConfig *rest.Config, joiningClusterName string, labels map[string]string) (*fedv1b1.KubeFedCluster, error) {
|
||||
|
||||
return joinClusterForNamespace(c.hostConfig,
|
||||
clusterConfig,
|
||||
kubefedNamespace,
|
||||
kubefedNamespace,
|
||||
hostClusterName,
|
||||
joiningClusterName,
|
||||
fmt.Sprintf("%s-secret", joiningClusterName),
|
||||
labels,
|
||||
apiextv1b1.ClusterScoped,
|
||||
false,
|
||||
false)
|
||||
}
|
||||
|
||||
// unJoinFederation unjoins a cluster from federation control plane.
|
||||
func (c *ClusterController) unJoinFederation(clusterConfig *rest.Config, unjoiningClusterName string) error {
|
||||
return unjoinCluster(c.hostConfig,
|
||||
clusterConfig,
|
||||
kubefedNamespace,
|
||||
hostClusterName,
|
||||
unjoiningClusterName,
|
||||
true,
|
||||
false)
|
||||
}
|
||||
|
||||
// allocatePort find a available port between [portRangeMin, portRangeMax] in maximumRetries
|
||||
// TODO: only works with handful clusters
|
||||
func (c *ClusterController) allocatePort() (uint16, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
clusters, err := c.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
const maximumRetries = 10
|
||||
for i := 0; i < maximumRetries; i++ {
|
||||
collision := false
|
||||
port := uint16(portRangeMin + rand.Intn(portRangeMax-portRangeMin+1))
|
||||
|
||||
for _, item := range clusters {
|
||||
if item.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy &&
|
||||
item.Spec.Connection.KubernetesAPIServerPort != 0 &&
|
||||
item.Spec.Connection.KubeSphereAPIServerPort == port {
|
||||
collision = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !collision {
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unable to allocate port after %d retries", maximumRetries)
|
||||
}
|
||||
|
||||
// generateToken returns a random 32-byte string as token
|
||||
func (c *ClusterController) generateToken() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
return fmt.Sprintf("%x", b)
|
||||
}
|
||||
|
||||
1
pkg/controller/cluster/helper.go
Normal file
1
pkg/controller/cluster/helper.go
Normal file
@@ -0,0 +1 @@
|
||||
package cluster
|
||||
720
pkg/controller/cluster/join.go
Normal file
720
pkg/controller/cluster/join.go
Normal file
@@ -0,0 +1,720 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog"
|
||||
"reflect"
|
||||
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
|
||||
"time"
|
||||
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
|
||||
)
|
||||
|
||||
var (
|
||||
// Policy rules allowing full access to resources in the cluster
|
||||
// or namespace.
|
||||
namespacedPolicyRules = []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{rbacv1.VerbAll},
|
||||
APIGroups: []string{rbacv1.APIGroupAll},
|
||||
Resources: []string{rbacv1.ResourceAll},
|
||||
},
|
||||
}
|
||||
clusterPolicyRules = []rbacv1.PolicyRule{
|
||||
namespacedPolicyRules[0],
|
||||
{
|
||||
NonResourceURLs: []string{rbacv1.NonResourceAll},
|
||||
Verbs: []string{"get"},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
tokenKey = "token"
|
||||
serviceAccountSecretTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// joinClusterForNamespace registers a cluster with a KubeFed control
|
||||
// plane. The KubeFed namespace in the joining cluster is provided by
|
||||
// the joiningNamespace parameter.
|
||||
func joinClusterForNamespace(hostConfig, clusterConfig *rest.Config, kubefedNamespace,
|
||||
joiningNamespace, hostClusterName, joiningClusterName, secretName string, labels map[string]string,
|
||||
scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
|
||||
|
||||
hostClientset, err := HostClientset(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clusterClientset, err := ClusterClientset(clusterConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get joining cluster clientset: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := genericclient.New(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Performing preflight checks.")
|
||||
err = performPreflightChecks(clusterClientset, joiningClusterName, hostClusterName, joiningNamespace, errorOnExisting)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Creating %s namespace in joining cluster", joiningNamespace)
|
||||
_, err = createKubeFedNamespace(clusterClientset, joiningNamespace, joiningClusterName, dryRun)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating %s namespace in joining cluster: %v", joiningNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
klog.V(2).Infof("Created %s namespace in joining cluster", joiningNamespace)
|
||||
|
||||
saName, err := createAuthorizedServiceAccount(clusterClientset, joiningNamespace, joiningClusterName, hostClusterName, scope, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secret, _, err := populateSecretInHostCluster(clusterClientset, hostClientset,
|
||||
saName, kubefedNamespace, joiningNamespace, joiningClusterName, secretName, dryRun)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating secret in host cluster: %s due to: %v", hostClusterName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var disabledTLSValidations []fedv1b1.TLSValidation
|
||||
if clusterConfig.TLSClientConfig.Insecure {
|
||||
disabledTLSValidations = append(disabledTLSValidations, fedv1b1.TLSAll)
|
||||
}
|
||||
|
||||
kubefedCluster, err := createKubeFedCluster(client, joiningClusterName, clusterConfig.Host,
|
||||
secret.Name, kubefedNamespace, clusterConfig.CAData, disabledTLSValidations, labels, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to create federated cluster resource: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Info("Created federated cluster resource")
|
||||
return kubefedCluster, nil
|
||||
}
|
||||
|
||||
// performPreflightChecks checks that the host and joining clusters are in
|
||||
// a consistent state.
|
||||
func performPreflightChecks(clusterClientset kubeclient.Interface, name, hostClusterName,
|
||||
kubefedNamespace string, errorOnExisting bool) error {
|
||||
// Make sure there is no existing service account in the joining cluster.
|
||||
saName := util.ClusterServiceAccountName(name, hostClusterName)
|
||||
_, err := clusterClientset.CoreV1().ServiceAccounts(kubefedNamespace).Get(saName, metav1.GetOptions{})
|
||||
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
case errorOnExisting:
|
||||
return errors.Errorf("service account: %s already exists in joining cluster: %s", saName, name)
|
||||
default:
|
||||
klog.V(2).Infof("Service account %s already exists in joining cluster %s", saName, name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// createKubeFedCluster creates a federated cluster resource that associates
|
||||
// the cluster and secret.
|
||||
func createKubeFedCluster(client genericclient.Client, joiningClusterName, apiEndpoint,
|
||||
secretName, kubefedNamespace string, caBundle []byte, disabledTLSValidations []fedv1b1.TLSValidation,
|
||||
labels map[string]string, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
|
||||
fedCluster := &fedv1b1.KubeFedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: kubefedNamespace,
|
||||
Name: joiningClusterName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: fedv1b1.KubeFedClusterSpec{
|
||||
APIEndpoint: apiEndpoint,
|
||||
CABundle: caBundle,
|
||||
SecretRef: fedv1b1.LocalSecretReference{
|
||||
Name: secretName,
|
||||
},
|
||||
DisabledTLSValidations: disabledTLSValidations,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return fedCluster, nil
|
||||
}
|
||||
|
||||
existingFedCluster := &fedv1b1.KubeFedCluster{}
|
||||
err := client.Get(context.TODO(), existingFedCluster, kubefedNamespace, joiningClusterName)
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not retrieve federated cluster %s due to %v", joiningClusterName, err)
|
||||
return nil, err
|
||||
case err == nil && errorOnExisting:
|
||||
return nil, errors.Errorf("federated cluster %s already exists in host cluster", joiningClusterName)
|
||||
case err == nil:
|
||||
existingFedCluster.Spec = fedCluster.Spec
|
||||
existingFedCluster.Labels = labels
|
||||
err = client.Update(context.TODO(), existingFedCluster)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update federated cluster %s due to %v", fedCluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return existingFedCluster, nil
|
||||
default:
|
||||
err = client.Create(context.TODO(), fedCluster)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create federated cluster %s due to %v", fedCluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return fedCluster, nil
|
||||
}
|
||||
}
|
||||
|
||||
// createKubeFedNamespace creates the kubefed namespace in the cluster
|
||||
// associated with clusterClientset, if it doesn't already exist.
|
||||
func createKubeFedNamespace(clusterClientset kubeclient.Interface, kubefedNamespace,
|
||||
joiningClusterName string, dryRun bool) (*corev1.Namespace, error) {
|
||||
fedNamespace := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubefedNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return fedNamespace, nil
|
||||
}
|
||||
|
||||
_, err := clusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Could not get %s namespace: %v", kubefedNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
klog.V(2).Infof("Already existing %s namespace", kubefedNamespace)
|
||||
return fedNamespace, nil
|
||||
}
|
||||
|
||||
// Not found, so create.
|
||||
_, err = clusterClientset.CoreV1().Namespaces().Create(fedNamespace)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.V(2).Infof("Could not create %s namespace: %v", kubefedNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
return fedNamespace, nil
|
||||
}
|
||||
|
||||
// createAuthorizedServiceAccount creates a service account and grants
|
||||
// the privileges required by the KubeFed control plane to manage
|
||||
// resources in the joining cluster. The name of the created service
|
||||
// account is returned on success.
|
||||
func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface,
|
||||
namespace, joiningClusterName, hostClusterName string,
|
||||
scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (string, error) {
|
||||
|
||||
klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName)
|
||||
|
||||
saName, err := createServiceAccount(joiningClusterClientset, namespace,
|
||||
joiningClusterName, hostClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v",
|
||||
saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
if scope == apiextv1b1.NamespaceScoped {
|
||||
klog.V(2).Infof("Creating role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
err = createRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating role and binding for service account: %s in joining cluster: %s due to: %v", saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created role and binding for service account: %s in joining cluster: %s",
|
||||
saName, joiningClusterName)
|
||||
|
||||
klog.V(2).Infof("Creating health check cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
err = createHealthCheckClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName,
|
||||
dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating health check cluster role and binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created health check cluster role and binding for service account: %s in joining cluster: %s",
|
||||
saName, joiningClusterName)
|
||||
|
||||
} else {
|
||||
klog.V(2).Infof("Creating cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
err = createClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating cluster role and binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created cluster role and binding for service account: %s in joining cluster: %s",
|
||||
saName, joiningClusterName)
|
||||
}
|
||||
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
// createServiceAccount creates a service account in the cluster associated
|
||||
// with clusterClientset with credentials that will be used by the host cluster
|
||||
// to access its API server.
|
||||
func createServiceAccount(clusterClientset kubeclient.Interface, namespace,
|
||||
joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) {
|
||||
saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName)
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
// Create a new service account.
|
||||
_, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Create(sa)
|
||||
switch {
|
||||
case apierrors.IsAlreadyExists(err) && errorOnExisting:
|
||||
klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName)
|
||||
return "", err
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
|
||||
return "", err
|
||||
default:
|
||||
return saName, nil
|
||||
}
|
||||
}
|
||||
|
||||
func bindingSubjects(saName, namespace string) []rbacv1.Subject {
|
||||
return []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createClusterRoleAndBinding creates an RBAC cluster role and
|
||||
// binding that allows the service account identified by saName to
|
||||
// access all resources in all namespaces in the cluster associated
|
||||
// with clientset.
|
||||
func createClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.RoleName(saName)
|
||||
|
||||
role := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: clusterPolicyRules,
|
||||
}
|
||||
existingRole, err := clientset.RbacV1().ClusterRoles().Get(roleName, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get cluster role for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
existingRole.Rules = role.Rules
|
||||
_, err := clientset.RbacV1().ClusterRoles().Update(existingRole)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
default: // role was not found
|
||||
_, err := clientset.RbacV1().ClusterRoles().Create(role)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This should limit its access to only necessary resources.
|
||||
binding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: bindingSubjects(saName, namespace),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "ClusterRole",
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get cluster role binding for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
|
||||
// must be deleted and recreated with the correct roleRef
|
||||
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
|
||||
err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not delete existing cluster role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
existingBinding.Subjects = binding.Subjects
|
||||
_, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createRoleAndBinding creates an RBAC role and binding
|
||||
// that allows the service account identified by saName to access all
|
||||
// resources in the specified namespace.
|
||||
func createRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.RoleName(saName)
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: namespacedPolicyRules,
|
||||
}
|
||||
existingRole, err := clientset.RbacV1().Roles(namespace).Get(roleName, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not retrieve role for service account %s in joining cluster %s due to %v", saName, clusterName, err)
|
||||
return err
|
||||
case errorOnExisting && err == nil:
|
||||
return errors.Errorf("role for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
existingRole.Rules = role.Rules
|
||||
_, err = clientset.RbacV1().Roles(namespace).Update(existingRole)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := clientset.RbacV1().Roles(namespace).Create(role)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
binding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: bindingSubjects(saName, namespace),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "Role",
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
|
||||
existingBinding, err := clientset.RbacV1().RoleBindings(namespace).Get(binding.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not retrieve role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("role binding for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
|
||||
// must be deleted and recreated with the correct roleRef
|
||||
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
|
||||
err = clientset.RbacV1().RoleBindings(namespace).Delete(existingBinding.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not delete existing role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
_, err = clientset.RbacV1().RoleBindings(namespace).Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
existingBinding.Subjects = binding.Subjects
|
||||
_, err = clientset.RbacV1().RoleBindings(namespace).Update(existingBinding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
_, err = clientset.RbacV1().RoleBindings(namespace).Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createHealthCheckClusterRoleAndBinding creates an RBAC cluster role and
|
||||
// binding that allows the service account identified by saName to
|
||||
// access the health check path of the cluster.
|
||||
func createHealthCheckClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.HealthCheckRoleName(saName, namespace)
|
||||
|
||||
role := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"Get"},
|
||||
NonResourceURLs: []string{"/healthz"},
|
||||
},
|
||||
// The cluster client expects to be able to list nodes to retrieve zone and region details.
|
||||
// TODO(marun) Consider making zone/region retrieval optional
|
||||
{
|
||||
Verbs: []string{"list"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
},
|
||||
},
|
||||
}
|
||||
existingRole, err := clientset.RbacV1().ClusterRoles().Get(role.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get health check cluster role for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("health check cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
existingRole.Rules = role.Rules
|
||||
_, err := clientset.RbacV1().ClusterRoles().Update(existingRole)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update health check cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
default: // role was not found
|
||||
_, err := clientset.RbacV1().ClusterRoles().Create(role)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create health check cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
binding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: bindingSubjects(saName, namespace),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "ClusterRole",
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get health check cluster role binding for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("health check cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
|
||||
// must be deleted and recreated with the correct roleRef
|
||||
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
|
||||
err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not delete existing health check cluster role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
existingBinding.Subjects = binding.Subjects
|
||||
_, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateSecretInHostCluster copies the service account secret for saName
|
||||
// from the cluster referenced by clusterClientset to the client referenced by
|
||||
// hostClientset, putting it in a secret named secretName in the provided
|
||||
// namespace.
|
||||
func populateSecretInHostCluster(clusterClientset, hostClientset kubeclient.Interface,
|
||||
saName, hostNamespace, joiningNamespace, joiningClusterName, secretName string,
|
||||
dryRun bool) (*corev1.Secret, []byte, error) {
|
||||
|
||||
klog.V(2).Infof("Creating cluster credentials secret in host cluster")
|
||||
|
||||
if dryRun {
|
||||
dryRunSecret := &corev1.Secret{}
|
||||
dryRunSecret.Name = secretName
|
||||
return dryRunSecret, nil, nil
|
||||
}
|
||||
|
||||
// Get the secret from the joining cluster.
|
||||
var secret *corev1.Secret
|
||||
err := wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) {
|
||||
sa, err := clusterClientset.CoreV1().ServiceAccounts(joiningNamespace).Get(saName,
|
||||
metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, objReference := range sa.Secrets {
|
||||
saSecretName := objReference.Name
|
||||
var err error
|
||||
secret, err = clusterClientset.CoreV1().Secrets(joiningNamespace).Get(saSecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if secret.Type == corev1.SecretTypeServiceAccountToken {
|
||||
klog.V(2).Infof("Using secret named: %s", secret.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not get service account secret from joining cluster: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
token, ok := secret.Data[tokenKey]
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("Key %q not found in service account secret", tokenKey)
|
||||
}
|
||||
|
||||
// Create a secret in the host cluster containing the token.
|
||||
v1Secret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: hostNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tokenKey: token,
|
||||
},
|
||||
}
|
||||
|
||||
if secretName == "" {
|
||||
v1Secret.GenerateName = joiningClusterName + "-"
|
||||
} else {
|
||||
v1Secret.Name = secretName
|
||||
}
|
||||
|
||||
var v1SecretResult *corev1.Secret
|
||||
_, err = hostClientset.CoreV1().Secrets(hostNamespace).Get(v1Secret.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Create(&v1Secret)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create secret in host cluster: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return v1SecretResult, nil, nil
|
||||
}
|
||||
klog.V(2).Infof("Could not get secret %s in host cluster: %v", v1Secret.Name, err)
|
||||
return nil, nil, err
|
||||
} else {
|
||||
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Update(&v1Secret)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Update secret %s in host cluster failed: %v", v1Secret.Name, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// caBundle is optional so no error is suggested if it is not
|
||||
// found in the secret.
|
||||
caBundle := secret.Data["ca.crt"]
|
||||
|
||||
klog.V(2).Infof("Created secret in host cluster named: %s", v1SecretResult.Name)
|
||||
return v1SecretResult, caBundle, nil
|
||||
}
|
||||
296
pkg/controller/cluster/unjoin.go
Normal file
296
pkg/controller/cluster/unjoin.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog"
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
|
||||
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
|
||||
)
|
||||
|
||||
// Following code copied from sigs.k8s.io/kubefed to avoid import collision
|
||||
|
||||
// UnjoinCluster performs all the necessary steps to remove the
|
||||
// registration of a cluster from a KubeFed control plane provided the
|
||||
// required set of parameters are passed in.
|
||||
func unjoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, hostClusterName, unjoiningClusterName string, forceDeletion, dryRun bool) error {
|
||||
|
||||
hostClientset, err := util.HostClientset(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
var clusterClientset *kubeclient.Clientset
|
||||
if clusterConfig != nil {
|
||||
clusterClientset, err = util.ClusterClientset(clusterConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get unjoining cluster clientset: %v", err)
|
||||
if !forceDeletion {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client, err := genericclient.New(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if clusterClientset != nil {
|
||||
err := deleteRBACResources(clusterClientset, kubefedNamespace, unjoiningClusterName, hostClusterName, forceDeletion, dryRun)
|
||||
if err != nil {
|
||||
if !forceDeletion {
|
||||
return err
|
||||
}
|
||||
klog.V(2).Infof("Failed to delete RBAC resources: %v", err)
|
||||
}
|
||||
|
||||
err = deleteFedNSFromUnjoinCluster(hostClientset, clusterClientset, kubefedNamespace, unjoiningClusterName, dryRun)
|
||||
if err != nil {
|
||||
if !forceDeletion {
|
||||
return err
|
||||
}
|
||||
klog.V(2).Infof("Failed to delete kubefed namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// deletionSucceeded when all operations in deleteRBACResources and deleteFedNSFromUnjoinCluster succeed.
|
||||
err = deleteFederatedClusterAndSecret(hostClientset, client, kubefedNamespace, unjoiningClusterName, forceDeletion, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteKubeFedClusterAndSecret deletes a federated cluster resource that associates
|
||||
// the cluster and secret.
|
||||
func deleteFederatedClusterAndSecret(hostClientset kubeclient.Interface, client genericclient.Client,
|
||||
kubefedNamespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting kubefed cluster resource from namespace %q for unjoin cluster %q",
|
||||
kubefedNamespace, unjoiningClusterName)
|
||||
|
||||
fedCluster := &fedv1b1.KubeFedCluster{}
|
||||
err := client.Get(context.TODO(), fedCluster, kubefedNamespace, unjoiningClusterName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "Failed to get kubefed cluster \"%s/%s\"", kubefedNamespace, unjoiningClusterName)
|
||||
}
|
||||
|
||||
err = hostClientset.CoreV1().Secrets(kubefedNamespace).Delete(fedCluster.Spec.SecretRef.Name,
|
||||
&metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Secret \"%s/%s\" does not exist in the host cluster.", kubefedNamespace, fedCluster.Spec.SecretRef.Name)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Failed to delete secret \"%s/%s\" for unjoin cluster %q",
|
||||
kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted secret \"%s/%s\" for unjoin cluster %q", kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
|
||||
}
|
||||
|
||||
err = client.Delete(context.TODO(), fedCluster, fedCluster.Namespace, fedCluster.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("KubeFed cluster \"%s/%s\" does not exist in the host cluster.", fedCluster.Namespace, fedCluster.Name)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Failed to delete kubefed cluster \"%s/%s\" for unjoin cluster %q", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted kubefed cluster \"%s/%s\" for unjoin cluster %q.", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteRBACResources deletes the cluster role, cluster rolebindings and service account
|
||||
// from the unjoining cluster.
|
||||
func deleteRBACResources(unjoiningClusterClientset kubeclient.Interface,
|
||||
namespace, unjoiningClusterName, hostClusterName string, forceDeletion, dryRun bool) error {
|
||||
|
||||
saName := ClusterServiceAccountName(unjoiningClusterName, hostClusterName)
|
||||
|
||||
err := deleteClusterRoleAndBinding(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, forceDeletion, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = deleteServiceAccount(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteFedNSFromUnjoinCluster deletes the kubefed namespace from
|
||||
// the unjoining cluster so long as the unjoining cluster is not the
|
||||
// host cluster.
|
||||
func deleteFedNSFromUnjoinCluster(hostClientset, unjoiningClusterClientset kubeclient.Interface,
|
||||
kubefedNamespace, unjoiningClusterName string, dryRun bool) error {
|
||||
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
hostClusterNamespace, err := hostClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving namespace %q from host cluster", kubefedNamespace)
|
||||
}
|
||||
|
||||
unjoiningClusterNamespace, err := unjoiningClusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
|
||||
}
|
||||
|
||||
if IsPrimaryCluster(hostClusterNamespace, unjoiningClusterNamespace) {
|
||||
klog.V(2).Infof("The kubefed namespace %q does not need to be deleted from the host cluster by unjoin.", kubefedNamespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
|
||||
err = unjoiningClusterClientset.CoreV1().Namespaces().Delete(kubefedNamespace, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("The kubefed namespace %q no longer exists in unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "Could not delete kubefed namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteServiceAccount deletes a service account in the cluster associated
|
||||
// with clusterClientset with credentials that are used by the host cluster
|
||||
// to access its API server.
|
||||
func deleteServiceAccount(clusterClientset kubeclient.Interface, saName,
|
||||
namespace, unjoiningClusterName string, dryRun bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
|
||||
|
||||
// Delete a service account.
|
||||
err := clusterClientset.CoreV1().ServiceAccounts(namespace).Delete(saName,
|
||||
&metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Service account \"%s/%s\" does not exist.", namespace, saName)
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "Could not delete service account \"%s/%s\"", namespace, saName)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteClusterRoleAndBinding deletes an RBAC cluster role and binding that
|
||||
// allows the service account identified by saName to access all resources in
|
||||
// all namespaces in the cluster associated with clusterClientset.
|
||||
func deleteClusterRoleAndBinding(clusterClientset kubeclient.Interface,
|
||||
saName, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.RoleName(saName)
|
||||
healthCheckRoleName := util.HealthCheckRoleName(saName, namespace)
|
||||
|
||||
// Attempt to delete all role and role bindings created by join
|
||||
for _, name := range []string{roleName, healthCheckRoleName} {
|
||||
klog.V(2).Infof("Deleting cluster role binding %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
|
||||
err := clusterClientset.RbacV1().ClusterRoleBindings().Delete(name, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Cluster role binding %q for service account %q does not exist in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete cluster role binding %q for service account %q in unjoining cluster %q",
|
||||
name, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted cluster role binding %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting cluster role %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
err = clusterClientset.RbacV1().ClusterRoles().Delete(name, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Cluster role %q for service account %q does not exist in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete cluster role %q for service account %q in unjoining cluster %q",
|
||||
name, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted cluster role %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
err := clusterClientset.RbacV1().RoleBindings(namespace).Delete(roleName, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Role binding \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete role binding \"%s/%s\" for service account %q in unjoining cluster %q",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting role \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
err = clusterClientset.RbacV1().Roles(namespace).Delete(roleName, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Role \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete role \"%s/%s\" for service account %q in unjoining cluster %q",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleting Role \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
166
pkg/controller/cluster/util.go
Normal file
166
pkg/controller/cluster/util.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Default values for the federated group and version used by
|
||||
// the enable and disable subcommands of `kubefedctl`.
|
||||
const (
|
||||
DefaultFederatedGroup = "types.kubefed.io"
|
||||
DefaultFederatedVersion = "v1beta1"
|
||||
|
||||
FederatedKindPrefix = "Federated"
|
||||
)
|
||||
|
||||
// FedConfig provides a rest config based on the filesystem kubeconfig (via
|
||||
// pathOptions) and context in order to talk to the host kubernetes cluster
|
||||
// and the joining kubernetes cluster.
|
||||
type FedConfig interface {
|
||||
HostConfig(context, kubeconfigPath string) (*rest.Config, error)
|
||||
ClusterConfig(context, kubeconfigPath string) (*rest.Config, error)
|
||||
GetClientConfig(ontext, kubeconfigPath string) clientcmd.ClientConfig
|
||||
}
|
||||
|
||||
// fedConfig implements the FedConfig interface.
|
||||
type fedConfig struct {
|
||||
pathOptions *clientcmd.PathOptions
|
||||
}
|
||||
|
||||
// NewFedConfig creates a fedConfig for `kubefedctl` commands.
|
||||
func NewFedConfig(pathOptions *clientcmd.PathOptions) FedConfig {
|
||||
return &fedConfig{
|
||||
pathOptions: pathOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// HostConfig provides a rest config to talk to the host kubernetes cluster
|
||||
// based on the context and kubeconfig passed in.
|
||||
func (a *fedConfig) HostConfig(context, kubeconfigPath string) (*rest.Config, error) {
|
||||
hostConfig := a.GetClientConfig(context, kubeconfigPath)
|
||||
hostClientConfig, err := hostConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hostClientConfig, nil
|
||||
}
|
||||
|
||||
// ClusterConfig provides a rest config to talk to the joining kubernetes
|
||||
// cluster based on the context and kubeconfig passed in.
|
||||
func (a *fedConfig) ClusterConfig(context, kubeconfigPath string) (*rest.Config, error) {
|
||||
clusterConfig := a.GetClientConfig(context, kubeconfigPath)
|
||||
clusterClientConfig, err := clusterConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clusterClientConfig, nil
|
||||
}
|
||||
|
||||
// getClientConfig is a helper method to create a client config from the
|
||||
// context and kubeconfig passed as arguments.
|
||||
func (a *fedConfig) GetClientConfig(context, kubeconfigPath string) clientcmd.ClientConfig {
|
||||
loadingRules := *a.pathOptions.LoadingRules
|
||||
loadingRules.Precedence = a.pathOptions.GetLoadingPrecedence()
|
||||
loadingRules.ExplicitPath = kubeconfigPath
|
||||
overrides := &clientcmd.ConfigOverrides{
|
||||
CurrentContext: context,
|
||||
}
|
||||
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, overrides)
|
||||
}
|
||||
|
||||
// HostClientset provides a kubernetes API compliant clientset to
|
||||
// communicate with the host cluster's kubernetes API server.
|
||||
func HostClientset(config *rest.Config) (*kubeclient.Clientset, error) {
|
||||
return kubeclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
// ClusterClientset provides a kubernetes API compliant clientset to
|
||||
// communicate with the joining cluster's kubernetes API server.
|
||||
func ClusterClientset(config *rest.Config) (*kubeclient.Clientset, error) {
|
||||
return kubeclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
// ClusterServiceAccountName returns the name of a service account whose
|
||||
// credentials are used by the host cluster to access the client cluster.
|
||||
func ClusterServiceAccountName(joiningClusterName, hostClusterName string) string {
|
||||
return fmt.Sprintf("%s-%s", joiningClusterName, hostClusterName)
|
||||
}
|
||||
|
||||
// RoleName returns the name of a Role or ClusterRole and its
|
||||
// associated RoleBinding or ClusterRoleBinding that are used to allow
|
||||
// the service account to access necessary resources on the cluster.
|
||||
func RoleName(serviceAccountName string) string {
|
||||
return fmt.Sprintf("kubefed-controller-manager:%s", serviceAccountName)
|
||||
}
|
||||
|
||||
// HealthCheckRoleName returns the name of a ClusterRole and its
|
||||
// associated ClusterRoleBinding that is used to allow the service
|
||||
// account to check the health of the cluster and list nodes.
|
||||
func HealthCheckRoleName(serviceAccountName, namespace string) string {
|
||||
return fmt.Sprintf("kubefed-controller-manager:%s:healthcheck-%s", namespace, serviceAccountName)
|
||||
}
|
||||
|
||||
// IsFederatedAPIResource checks if a resource with the given Kind and group is a Federated one
|
||||
func IsFederatedAPIResource(kind, group string) bool {
|
||||
return strings.HasPrefix(kind, FederatedKindPrefix) && group == DefaultFederatedGroup
|
||||
}
|
||||
|
||||
// GetNamespace returns namespace of the current context
|
||||
func GetNamespace(hostClusterContext string, kubeconfig string, config FedConfig) (string, error) {
|
||||
clientConfig := config.GetClientConfig(hostClusterContext, kubeconfig)
|
||||
currentContext, err := CurrentContext(clientConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ns, _, err := clientConfig.Namespace()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "Failed to get ClientConfig for host cluster context %q and kubeconfig %q",
|
||||
currentContext, kubeconfig)
|
||||
}
|
||||
|
||||
if len(ns) == 0 {
|
||||
ns = "default"
|
||||
}
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
// CurrentContext retrieves the current context from the provided config.
|
||||
func CurrentContext(config clientcmd.ClientConfig) (string, error) {
|
||||
rawConfig, err := config.RawConfig()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Failed to get current context from config")
|
||||
}
|
||||
return rawConfig.CurrentContext, nil
|
||||
}
|
||||
|
||||
// IsPrimaryCluster checks if the caller is working with objects for the
|
||||
// primary cluster by checking if the UIDs match for both ObjectMetas passed
|
||||
// in.
|
||||
// TODO (font): Need to revisit this when cluster ID is available.
|
||||
func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool {
|
||||
meta := MetaAccessor(obj)
|
||||
clusterMeta := MetaAccessor(clusterObj)
|
||||
return meta.GetUID() == clusterMeta.GetUID()
|
||||
}
|
||||
|
||||
func MetaAccessor(obj pkgruntime.Object) metav1.Object {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
// This should always succeed if obj is not nil. Also,
|
||||
// adapters are slated for replacement by unstructured.
|
||||
return nil
|
||||
}
|
||||
return accessor
|
||||
}
|
||||
Reference in New Issue
Block a user