Merge remote-tracking branch 'upstream/dev' into dev
# Conflicts: # go.mod # pkg/apiserver/apiserver.go # pkg/apiserver/config/config.go
This commit is contained in:
@@ -1,123 +0,0 @@
|
||||
/*
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ResourceKindAgent = "Agent"
|
||||
ResourcesSingularAgent = "agent"
|
||||
ResourcesPluralAgent = "agents"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// AgentSpec defines the desired state of Agent
|
||||
type AgentSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// Token used by agents to connect to proxy.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// Proxy address
|
||||
// +optional
|
||||
Proxy string `json:"proxy,omitempty"`
|
||||
|
||||
// KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic
|
||||
// +optional
|
||||
KubernetesAPIServerPort uint16 `json:"kubernetesAPIServerPort,omitempty"`
|
||||
|
||||
// KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic
|
||||
// +optional
|
||||
KubeSphereAPIServerPort uint16 `json:"kubesphereAPIServerPort,omitempty"`
|
||||
|
||||
// Indicates that the agent is paused.
|
||||
// +optional
|
||||
Paused bool `json:"paused,omitempty"`
|
||||
}
|
||||
|
||||
type AgentConditionType string
|
||||
|
||||
const (
|
||||
// Agent is initialized, and waiting for establishing to a proxy server
|
||||
AgentInitialized AgentConditionType = "Initialized"
|
||||
|
||||
// Agent has successfully connected to proxy server
|
||||
AgentConnected AgentConditionType = "Connected"
|
||||
)
|
||||
|
||||
type AgentCondition struct {
|
||||
// Type of AgentCondition
|
||||
Type AgentConditionType `json:"type,omitempty"`
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
Status v1.ConditionStatus `json:"status"`
|
||||
// The last time this condition was updated.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
// The reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// A human readable message indicating details about the transition.
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// AgentStatus defines the observed state of Agent
|
||||
type AgentStatus struct {
|
||||
|
||||
// Represents the latest available observations of a agent's current state.
|
||||
Conditions []AgentCondition `json:"conditions,omitempty"`
|
||||
|
||||
// Represents the connection quality, in ms
|
||||
Ping uint64 `json:"ping,omitempty"`
|
||||
|
||||
// Issued new kubeconfig by proxy server
|
||||
KubeConfig []byte `json:"kubeconfig,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:openapi-gen=true
|
||||
// +genclient:nonNamespaced
|
||||
// +kubebuilder:printcolumn:name="Paused",type="bool",JSONPath=".spec.Paused"
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
|
||||
// Agent is the Schema for the agents API
|
||||
type Agent struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec AgentSpec `json:"spec,omitempty"`
|
||||
Status AgentStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AgentList contains a list of Agent
|
||||
type AgentList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Agent `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Agent{}, &AgentList{})
|
||||
}
|
||||
@@ -11,20 +11,71 @@ const (
|
||||
ResourcesPluralCluster = "clusters"
|
||||
|
||||
IsHostCluster = "cluster.kubesphere.io/is-host-cluster"
|
||||
// Description of which region the cluster been placed
|
||||
ClusterRegion = "cluster.kubesphere.io/region"
|
||||
// Name of the cluster group
|
||||
ClusterGroup = "cluster.kubesphere.io/group"
|
||||
|
||||
Finalizer = "finalizer.cluster.kubesphere.io"
|
||||
)
|
||||
|
||||
type ClusterSpec struct {
|
||||
|
||||
// Join cluster as a kubefed cluster
|
||||
// +optional
|
||||
Federated bool `json:"federated,omitempty"`
|
||||
JoinFederation bool `json:"joinFederation,omitempty"`
|
||||
|
||||
// Desired state of the cluster
|
||||
Active bool `json:"active,omitempty"`
|
||||
Enable bool `json:"enable,omitempty"`
|
||||
|
||||
// Provider of the cluster, this field is just for description
|
||||
// +optional
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// Connection holds info to connect to the member cluster
|
||||
Connection Connection `json:"connection,omitempty"`
|
||||
}
|
||||
|
||||
type ConnectionType string
|
||||
|
||||
const (
|
||||
ConnectionTypeDirect ConnectionType = "direct"
|
||||
ConnectionTypeProxy ConnectionType = "proxy"
|
||||
)
|
||||
|
||||
type Connection struct {
|
||||
|
||||
// type defines how host cluster will connect to host cluster
|
||||
// ConnectionTypeDirect means direct connection, this requires
|
||||
// kubeconfig and kubesphere apiserver endpoint provided
|
||||
// ConnectionTypeProxy means using kubesphere proxy, no kubeconfig
|
||||
// or kubesphere apiserver endpoint required
|
||||
Type ConnectionType `json:"type,omitempty"`
|
||||
|
||||
// KubeSphere API Server endpoint. Example: http://10.10.0.11:8080
|
||||
// Should provide this field explicitly if connection type is direct.
|
||||
// Will be populated by ks-apiserver if connection type is proxy.
|
||||
KubeSphereAPIEndpoint string `json:"kubesphereAPIEndpoint,omitempty"`
|
||||
|
||||
// Kubernetes API Server endpoint. Example: https://10.10.0.1:6443
|
||||
// Should provide this field explicitly if connection type is direct.
|
||||
// Will be populated by ks-apiserver if connection type is proxy.
|
||||
KubernetesAPIEndpoint string `json:"kubernetesAPIEndpoint,omitempty"`
|
||||
|
||||
// KubeConfig content used to connect to cluster api server
|
||||
// Should provide this field explicitly if connection type is direct.
|
||||
// Will be populated by ks-proxy if connection type is proxy.
|
||||
KubeConfig []byte `json:"kubeconfig,omitempty"`
|
||||
|
||||
// Token used by agents of member cluster to connect to host cluster proxy.
|
||||
// This field is populated by apiserver only if connection type is proxy.
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic
|
||||
// Only applicable when connection type is proxy.
|
||||
KubernetesAPIServerPort uint16 `json:"kubernetesAPIServerPort,omitempty"`
|
||||
|
||||
// KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic
|
||||
// Only applicable when connection type is proxy.
|
||||
KubeSphereAPIServerPort uint16 `json:"kubesphereAPIServerPort,omitempty"`
|
||||
}
|
||||
|
||||
type ClusterConditionType string
|
||||
@@ -38,6 +89,9 @@ const (
|
||||
|
||||
// Cluster has been one of federated clusters
|
||||
ClusterFederated ClusterConditionType = "Federated"
|
||||
|
||||
// Cluster is all available for requests
|
||||
ClusterReady ClusterConditionType = "Ready"
|
||||
)
|
||||
|
||||
type ClusterCondition struct {
|
||||
@@ -60,22 +114,29 @@ type ClusterStatus struct {
|
||||
// Represents the latest available observations of a cluster's current state.
|
||||
Conditions []ClusterCondition `json:"conditions,omitempty"`
|
||||
|
||||
// GitVersion of the kubernetes cluster, this field is set by cluster controller
|
||||
// +optional
|
||||
// GitVersion of the kubernetes cluster, this field is populated by cluster controller
|
||||
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
|
||||
|
||||
// Count of the kubernetes cluster nodes
|
||||
// +optional
|
||||
// This field may not reflect the instant status of the cluster.
|
||||
NodeCount int `json:"nodeCount,omitempty"`
|
||||
|
||||
// Zones are the names of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'.
|
||||
// +optional
|
||||
Zones []string `json:"zones,omitempty"`
|
||||
|
||||
// Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'.
|
||||
// +optional
|
||||
Region *string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:openapi-gen=true
|
||||
// +genclient:nonNamespaced
|
||||
// +kubebuilder:printcolumn:name="Federated",type="boolean",JSONPath=".spec.federated"
|
||||
// +kubebuilder:printcolumn:name="Federated",type="boolean",JSONPath=".spec.joinFederation"
|
||||
// +kubebuilder:printcolumn:name="Provider",type="string",JSONPath=".spec.provider"
|
||||
// +kubebuilder:printcolumn:name="Active",type="boolean",JSONPath=".spec.active"
|
||||
// +kubebuilder:printcolumn:name="Active",type="boolean",JSONPath=".spec.enable"
|
||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.kubernetesVersion"
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
|
||||
|
||||
163
pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go
generated
163
pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go
generated
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
@@ -24,137 +24,13 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Agent) DeepCopyInto(out *Agent) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Agent.
|
||||
func (in *Agent) DeepCopy() *Agent {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Agent)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Agent) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AgentCondition) DeepCopyInto(out *AgentCondition) {
|
||||
*out = *in
|
||||
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentCondition.
|
||||
func (in *AgentCondition) DeepCopy() *AgentCondition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AgentCondition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AgentList) DeepCopyInto(out *AgentList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Agent, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentList.
|
||||
func (in *AgentList) DeepCopy() *AgentList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AgentList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AgentList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AgentSpec) DeepCopyInto(out *AgentSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentSpec.
|
||||
func (in *AgentSpec) DeepCopy() *AgentSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AgentSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AgentStatus) DeepCopyInto(out *AgentStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]AgentCondition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.KubeConfig != nil {
|
||||
in, out := &in.KubeConfig, &out.KubeConfig
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentStatus.
|
||||
func (in *AgentStatus) DeepCopy() *AgentStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AgentStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Cluster) DeepCopyInto(out *Cluster) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
|
||||
@@ -180,7 +56,6 @@ func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) {
|
||||
*out = *in
|
||||
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition.
|
||||
@@ -205,7 +80,6 @@ func (in *ClusterList) DeepCopyInto(out *ClusterList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
|
||||
@@ -229,7 +103,7 @@ func (in *ClusterList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
|
||||
*out = *in
|
||||
return
|
||||
in.Connection.DeepCopyInto(&out.Connection)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
|
||||
@@ -252,7 +126,16 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
if in.Zones != nil {
|
||||
in, out := &in.Zones, &out.Zones
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Region != nil {
|
||||
in, out := &in.Region, &out.Region
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
|
||||
@@ -264,3 +147,23 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Connection) DeepCopyInto(out *Connection) {
|
||||
*out = *in
|
||||
if in.KubeConfig != nil {
|
||||
in, out := &in.KubeConfig, &out.KubeConfig
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
|
||||
func (in *Connection) DeepCopy() *Connection {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Connection)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
37
pkg/apis/devops/v1alpha1/zz_generated.deepcopy.go
generated
37
pkg/apis/devops/v1alpha1/zz_generated.deepcopy.go
generated
@@ -16,12 +16,12 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -33,7 +33,6 @@ func (in *AuthConfig) DeepCopyInto(out *AuthConfig) {
|
||||
*out = new(v1.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthConfig.
|
||||
@@ -49,7 +48,6 @@ func (in *AuthConfig) DeepCopy() *AuthConfig {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CGroupLimits) DeepCopyInto(out *CGroupLimits) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CGroupLimits.
|
||||
@@ -77,7 +75,6 @@ func (in *ContainerConfig) DeepCopyInto(out *ContainerConfig) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfig.
|
||||
@@ -103,7 +100,6 @@ func (in *ContainerInfo) DeepCopyInto(out *ContainerInfo) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInfo.
|
||||
@@ -119,7 +115,6 @@ func (in *ContainerInfo) DeepCopy() *ContainerInfo {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DockerConfig) DeepCopyInto(out *DockerConfig) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig.
|
||||
@@ -135,7 +130,6 @@ func (in *DockerConfig) DeepCopy() *DockerConfig {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DockerConfigEntry) DeepCopyInto(out *DockerConfigEntry) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfigEntry.
|
||||
@@ -158,7 +152,6 @@ func (in *DockerConfigJson) DeepCopyInto(out *DockerConfigJson) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfigJson.
|
||||
@@ -179,7 +172,6 @@ func (in DockerConfigMap) DeepCopyInto(out *DockerConfigMap) {
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,7 +188,6 @@ func (in DockerConfigMap) DeepCopy() DockerConfigMap {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvironmentSpec) DeepCopyInto(out *EnvironmentSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentSpec.
|
||||
@@ -217,7 +208,6 @@ func (in *Parameter) DeepCopyInto(out *Parameter) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter.
|
||||
@@ -233,7 +223,6 @@ func (in *Parameter) DeepCopy() *Parameter {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
|
||||
@@ -259,7 +248,6 @@ func (in *S2iAutoScale) DeepCopyInto(out *S2iAutoScale) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iAutoScale.
|
||||
@@ -279,7 +267,6 @@ func (in *S2iBinary) DeepCopyInto(out *S2iBinary) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinary.
|
||||
@@ -312,7 +299,6 @@ func (in *S2iBinaryList) DeepCopyInto(out *S2iBinaryList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinaryList.
|
||||
@@ -340,7 +326,6 @@ func (in *S2iBinarySpec) DeepCopyInto(out *S2iBinarySpec) {
|
||||
in, out := &in.UploadTimeStamp, &out.UploadTimeStamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinarySpec.
|
||||
@@ -356,7 +341,6 @@ func (in *S2iBinarySpec) DeepCopy() *S2iBinarySpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *S2iBinaryStatus) DeepCopyInto(out *S2iBinaryStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinaryStatus.
|
||||
@@ -377,7 +361,6 @@ func (in *S2iBuildResult) DeepCopyInto(out *S2iBuildResult) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuildResult.
|
||||
@@ -393,7 +376,6 @@ func (in *S2iBuildResult) DeepCopy() *S2iBuildResult {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *S2iBuildSource) DeepCopyInto(out *S2iBuildSource) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuildSource.
|
||||
@@ -413,7 +395,6 @@ func (in *S2iBuilder) DeepCopyInto(out *S2iBuilder) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilder.
|
||||
@@ -446,7 +427,6 @@ func (in *S2iBuilderList) DeepCopyInto(out *S2iBuilderList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderList.
|
||||
@@ -480,7 +460,6 @@ func (in *S2iBuilderSpec) DeepCopyInto(out *S2iBuilderSpec) {
|
||||
*out = new(UserDefineTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderSpec.
|
||||
@@ -505,7 +484,6 @@ func (in *S2iBuilderStatus) DeepCopyInto(out *S2iBuilderStatus) {
|
||||
in, out := &in.LastRunStartTime, &out.LastRunStartTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderStatus.
|
||||
@@ -525,7 +503,6 @@ func (in *S2iBuilderTemplate) DeepCopyInto(out *S2iBuilderTemplate) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplate.
|
||||
@@ -558,7 +535,6 @@ func (in *S2iBuilderTemplateList) DeepCopyInto(out *S2iBuilderTemplateList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplateList.
|
||||
@@ -596,7 +572,6 @@ func (in *S2iBuilderTemplateSpec) DeepCopyInto(out *S2iBuilderTemplateSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplateSpec.
|
||||
@@ -612,7 +587,6 @@ func (in *S2iBuilderTemplateSpec) DeepCopy() *S2iBuilderTemplateSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *S2iBuilderTemplateStatus) DeepCopyInto(out *S2iBuilderTemplateStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplateStatus.
|
||||
@@ -715,7 +689,6 @@ func (in *S2iConfig) DeepCopyInto(out *S2iConfig) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iConfig.
|
||||
@@ -735,7 +708,6 @@ func (in *S2iRun) DeepCopyInto(out *S2iRun) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRun.
|
||||
@@ -768,7 +740,6 @@ func (in *S2iRunList) DeepCopyInto(out *S2iRunList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRunList.
|
||||
@@ -792,7 +763,6 @@ func (in *S2iRunList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *S2iRunSpec) DeepCopyInto(out *S2iRunSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRunSpec.
|
||||
@@ -826,7 +796,6 @@ func (in *S2iRunStatus) DeepCopyInto(out *S2iRunStatus) {
|
||||
*out = new(S2iBuildSource)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRunStatus.
|
||||
@@ -849,7 +818,6 @@ func (in *UserDefineTemplate) DeepCopyInto(out *UserDefineTemplate) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefineTemplate.
|
||||
@@ -865,7 +833,6 @@ func (in *UserDefineTemplate) DeepCopy() *UserDefineTemplate {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec.
|
||||
|
||||
22
pkg/apis/network/v1alpha1/zz_generated.deepcopy.go
generated
22
pkg/apis/network/v1alpha1/zz_generated.deepcopy.go
generated
@@ -16,16 +16,16 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
numorstring "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1/numorstring"
|
||||
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1/numorstring"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
@@ -56,7 +56,6 @@ func (in *EntityRule) DeepCopyInto(out *EntityRule) {
|
||||
*out = new(ServiceAccountMatch)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntityRule.
|
||||
@@ -82,7 +81,6 @@ func (in *HTTPMatch) DeepCopyInto(out *HTTPMatch) {
|
||||
*out = make([]HTTPPath, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatch.
|
||||
@@ -98,7 +96,6 @@ func (in *HTTPMatch) DeepCopy() *HTTPMatch {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HTTPPath) DeepCopyInto(out *HTTPPath) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPath.
|
||||
@@ -124,7 +121,6 @@ func (in *ICMPFields) DeepCopyInto(out *ICMPFields) {
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPFields.
|
||||
@@ -143,7 +139,6 @@ func (in *NamespaceNetworkPolicy) DeepCopyInto(out *NamespaceNetworkPolicy) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceNetworkPolicy.
|
||||
@@ -176,7 +171,6 @@ func (in *NamespaceNetworkPolicyList) DeepCopyInto(out *NamespaceNetworkPolicyLi
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceNetworkPolicyList.
|
||||
@@ -224,7 +218,6 @@ func (in *NamespaceNetworkPolicySpec) DeepCopyInto(out *NamespaceNetworkPolicySp
|
||||
*out = make([]PolicyType, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceNetworkPolicySpec.
|
||||
@@ -272,7 +265,6 @@ func (in *Rule) DeepCopyInto(out *Rule) {
|
||||
*out = new(HTTPMatch)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
|
||||
@@ -293,7 +285,6 @@ func (in *ServiceAccountMatch) DeepCopyInto(out *ServiceAccountMatch) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountMatch.
|
||||
@@ -313,7 +304,6 @@ func (in *WorkspaceNetworkPolicy) DeepCopyInto(out *WorkspaceNetworkPolicy) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicy.
|
||||
@@ -351,7 +341,6 @@ func (in *WorkspaceNetworkPolicyEgressRule) DeepCopyInto(out *WorkspaceNetworkPo
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyEgressRule.
|
||||
@@ -381,7 +370,6 @@ func (in *WorkspaceNetworkPolicyIngressRule) DeepCopyInto(out *WorkspaceNetworkP
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyIngressRule.
|
||||
@@ -406,7 +394,6 @@ func (in *WorkspaceNetworkPolicyList) DeepCopyInto(out *WorkspaceNetworkPolicyLi
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyList.
|
||||
@@ -436,7 +423,6 @@ func (in *WorkspaceNetworkPolicyPeer) DeepCopyInto(out *WorkspaceNetworkPolicyPe
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyPeer.
|
||||
@@ -471,7 +457,6 @@ func (in *WorkspaceNetworkPolicySpec) DeepCopyInto(out *WorkspaceNetworkPolicySp
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicySpec.
|
||||
@@ -487,7 +472,6 @@ func (in *WorkspaceNetworkPolicySpec) DeepCopy() *WorkspaceNetworkPolicySpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkspaceNetworkPolicyStatus) DeepCopyInto(out *WorkspaceNetworkPolicyStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyStatus.
|
||||
|
||||
@@ -16,12 +16,12 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -30,7 +30,6 @@ func (in *DestinationRuleSpecTemplate) DeepCopyInto(out *DestinationRuleSpecTemp
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleSpecTemplate.
|
||||
@@ -50,7 +49,6 @@ func (in *ServicePolicy) DeepCopyInto(out *ServicePolicy) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicy.
|
||||
@@ -76,7 +74,6 @@ func (in *ServicePolicyCondition) DeepCopyInto(out *ServicePolicyCondition) {
|
||||
*out = *in
|
||||
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicyCondition.
|
||||
@@ -101,7 +98,6 @@ func (in *ServicePolicyList) DeepCopyInto(out *ServicePolicyList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicyList.
|
||||
@@ -131,7 +127,6 @@ func (in *ServicePolicySpec) DeepCopyInto(out *ServicePolicySpec) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicySpec.
|
||||
@@ -162,7 +157,6 @@ func (in *ServicePolicyStatus) DeepCopyInto(out *ServicePolicyStatus) {
|
||||
in, out := &in.CompletionTime, &out.CompletionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicyStatus.
|
||||
@@ -182,7 +176,6 @@ func (in *Strategy) DeepCopyInto(out *Strategy) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Strategy.
|
||||
@@ -208,7 +201,6 @@ func (in *StrategyCondition) DeepCopyInto(out *StrategyCondition) {
|
||||
*out = *in
|
||||
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyCondition.
|
||||
@@ -233,7 +225,6 @@ func (in *StrategyList) DeepCopyInto(out *StrategyList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||
@@ -263,7 +254,6 @@ func (in *StrategySpec) DeepCopyInto(out *StrategySpec) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategySpec.
|
||||
@@ -294,7 +284,6 @@ func (in *StrategyStatus) DeepCopyInto(out *StrategyStatus) {
|
||||
in, out := &in.CompletionTime, &out.CompletionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyStatus.
|
||||
@@ -312,7 +301,6 @@ func (in *VirtualServiceTemplateSpec) DeepCopyInto(out *VirtualServiceTemplateSp
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceTemplateSpec.
|
||||
|
||||
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
@@ -31,7 +31,6 @@ func (in *Workspace) DeepCopyInto(out *Workspace) {
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace.
|
||||
@@ -64,7 +63,6 @@ func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList.
|
||||
@@ -88,7 +86,6 @@ func (in *WorkspaceList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec.
|
||||
@@ -104,7 +101,6 @@ func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus.
|
||||
|
||||
@@ -11,18 +11,13 @@ import (
|
||||
unionauth "k8s.io/apiserver/pkg/authentication/request/union"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/klog"
|
||||
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/authenticators/basic"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/authenticators/jwttoken"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/request/anonymous"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/request/basictoken"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/request/bearertoken"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/token"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizerfactory"
|
||||
authorizationoptions "kubesphere.io/kubesphere/pkg/apiserver/authorization/options"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/path"
|
||||
unionauthorizer "kubesphere.io/kubesphere/pkg/apiserver/authorization/union"
|
||||
apiserverconfig "kubesphere.io/kubesphere/pkg/apiserver/config"
|
||||
@@ -32,7 +27,7 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
configv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/config/v1alpha2"
|
||||
devopsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha2"
|
||||
iamapi "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2"
|
||||
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2"
|
||||
monitoringv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha3"
|
||||
networkv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/network/v1alpha2"
|
||||
@@ -201,9 +196,10 @@ func (s *APIServer) buildHandlerChain() {
|
||||
handler := s.Server.Handler
|
||||
handler = filters.WithKubeAPIServer(handler, s.KubernetesClient.Config(), &errorResponder{})
|
||||
|
||||
clusterDispatcher := dispatch.NewClusterDispatch(s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().
|
||||
V1alpha1().Agents().Lister(), s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Lister())
|
||||
handler = filters.WithMultipleClusterDispatcher(handler, clusterDispatcher)
|
||||
if s.Config.MultiClusterOptions.Enable {
|
||||
clusterDispatcher := dispatch.NewClusterDispatch(s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Lister())
|
||||
handler = filters.WithMultipleClusterDispatcher(handler, clusterDispatcher)
|
||||
}
|
||||
|
||||
var authorizers authorizer.Authorizer
|
||||
|
||||
@@ -309,6 +305,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error {
|
||||
{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "globalrolebindings"},
|
||||
{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "workspaceroles"},
|
||||
{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "workspacerolebindings"},
|
||||
{Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "clusters"},
|
||||
}
|
||||
|
||||
devopsGVRs := []schema.GroupVersionResource{
|
||||
@@ -357,7 +354,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error {
|
||||
if !isResourceExists(gvr) {
|
||||
klog.Warningf("resource %s not exists in the cluster", gvr)
|
||||
} else {
|
||||
_, err := appInformerFactory.ForResource(gvr)
|
||||
_, err = appInformerFactory.ForResource(gvr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"github.com/spf13/viper"
|
||||
authoptions "kubesphere.io/kubesphere/pkg/apiserver/authentication/options"
|
||||
authorizationoptions "kubesphere.io/kubesphere/pkg/apiserver/authorization/options"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
|
||||
@@ -59,19 +58,20 @@ const (
|
||||
|
||||
// Config defines everything needed for apiserver to deal with external services
|
||||
type Config struct {
|
||||
DevopsOptions *jenkins.Options `json:"devops,omitempty" yaml:"devops,omitempty" mapstructure:"devops"`
|
||||
SonarQubeOptions *sonarqube.Options `json:"sonarqube,omitempty" yaml:"sonarQube,omitempty" mapstructure:"sonarqube"`
|
||||
KubernetesOptions *k8s.KubernetesOptions `json:"kubernetes,omitempty" yaml:"kubernetes,omitempty" mapstructure:"kubernetes"`
|
||||
ServiceMeshOptions *servicemesh.Options `json:"servicemesh,omitempty" yaml:"servicemesh,omitempty" mapstructure:"servicemesh"`
|
||||
NetworkOptions *network.Options `json:"network,omitempty" yaml:"network,omitempty" mapstructure:"network"`
|
||||
LdapOptions *ldap.Options `json:"ldap,omitempty" yaml:"ldap,omitempty" mapstructure:"ldap"`
|
||||
RedisOptions *cache.Options `json:"redis,omitempty" yaml:"redis,omitempty" mapstructure:"redis"`
|
||||
S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"`
|
||||
OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"`
|
||||
MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"`
|
||||
LoggingOptions *elasticsearch.Options `json:"logging,omitempty" yaml:"logging,omitempty" mapstructure:"logging"`
|
||||
AuthenticationOptions *authoptions.AuthenticationOptions `json:"authentication,omitempty" yaml:"authentication,omitempty" mapstructure:"authentication"`
|
||||
DevopsOptions *jenkins.Options `json:"devops,omitempty" yaml:"devops,omitempty" mapstructure:"devops"`
|
||||
SonarQubeOptions *sonarqube.Options `json:"sonarqube,omitempty" yaml:"sonarQube,omitempty" mapstructure:"sonarqube"`
|
||||
KubernetesOptions *k8s.KubernetesOptions `json:"kubernetes,omitempty" yaml:"kubernetes,omitempty" mapstructure:"kubernetes"`
|
||||
ServiceMeshOptions *servicemesh.Options `json:"servicemesh,omitempty" yaml:"servicemesh,omitempty" mapstructure:"servicemesh"`
|
||||
NetworkOptions *network.Options `json:"network,omitempty" yaml:"network,omitempty" mapstructure:"network"`
|
||||
LdapOptions *ldap.Options `json:"ldap,omitempty" yaml:"ldap,omitempty" mapstructure:"ldap"`
|
||||
RedisOptions *cache.Options `json:"redis,omitempty" yaml:"redis,omitempty" mapstructure:"redis"`
|
||||
S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"`
|
||||
OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"`
|
||||
MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"`
|
||||
LoggingOptions *elasticsearch.Options `json:"logging,omitempty" yaml:"logging,omitempty" mapstructure:"logging"`
|
||||
AuthenticationOptions *authoptions.AuthenticationOptions `json:"authentication,omitempty" yaml:"authentication,omitempty" mapstructure:"authentication"`
|
||||
AuthorizationOptions *authorizationoptions.AuthorizationOptions `json:"authorization,omitempty" yaml:"authorization,omitempty" mapstructure:"authorization"`
|
||||
MultiClusterOptions *multicluster.Options `json:"multicluster,omitempty" yaml:"multicluster,omitempty" mapstructure:"multicluster"`
|
||||
// Options used for enabling components, not actually used now. Once we switch Alerting/Notification API to kubesphere,
|
||||
// we can add these options to kubesphere command lines
|
||||
AlertingOptions *alerting.Options `json:"alerting,omitempty" yaml:"alerting,omitempty" mapstructure:"alerting"`
|
||||
@@ -207,4 +207,7 @@ func (conf *Config) stripEmptyOptions() {
|
||||
conf.NotificationOptions = nil
|
||||
}
|
||||
|
||||
if conf.MultiClusterOptions != nil && !conf.MultiClusterOptions.Enable {
|
||||
conf.MultiClusterOptions = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/ldap"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/multicluster"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/network"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/notification"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
|
||||
@@ -120,6 +121,9 @@ func newTestConfig() (*Config, error) {
|
||||
AccessTokenInactivityTimeout: 0,
|
||||
},
|
||||
},
|
||||
MultiClusterOptions: &multicluster.Options{
|
||||
Enable: false,
|
||||
},
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/request"
|
||||
"kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -20,13 +21,11 @@ type Dispatcher interface {
|
||||
}
|
||||
|
||||
type clusterDispatch struct {
|
||||
agentLister v1alpha1.AgentLister
|
||||
clusterLister v1alpha1.ClusterLister
|
||||
}
|
||||
|
||||
func NewClusterDispatch(agentLister v1alpha1.AgentLister, clusterLister v1alpha1.ClusterLister) Dispatcher {
|
||||
func NewClusterDispatch(clusterLister v1alpha1.ClusterLister) Dispatcher {
|
||||
return &clusterDispatch{
|
||||
agentLister: agentLister,
|
||||
clusterLister: clusterLister,
|
||||
}
|
||||
}
|
||||
@@ -58,23 +57,19 @@ func (c *clusterDispatch) Dispatch(w http.ResponseWriter, req *http.Request, han
|
||||
return
|
||||
}
|
||||
|
||||
agent, err := c.agentLister.Get(info.Cluster)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
http.Error(w, fmt.Sprintf("cluster %s not found", info.Cluster), http.StatusNotFound)
|
||||
} else {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !isAgentReady(agent) {
|
||||
if !isClusterReady(cluster) {
|
||||
http.Error(w, fmt.Sprintf("cluster agent is not ready"), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
endpoint, err := url.Parse(cluster.Spec.Connection.KubeSphereAPIEndpoint)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
u := *req.URL
|
||||
u.Host = agent.Spec.Proxy
|
||||
u.Host = endpoint.Host
|
||||
u.Path = strings.Replace(u.Path, fmt.Sprintf("/clusters/%s", info.Cluster), "", 1)
|
||||
|
||||
httpProxy := proxy.NewUpgradeAwareHandler(&u, http.DefaultTransport, true, false, c)
|
||||
@@ -85,9 +80,9 @@ func (c *clusterDispatch) Error(w http.ResponseWriter, req *http.Request, err er
|
||||
responsewriters.InternalError(w, req, err)
|
||||
}
|
||||
|
||||
func isAgentReady(agent *clusterv1alpha1.Agent) bool {
|
||||
for _, condition := range agent.Status.Conditions {
|
||||
if condition.Type == clusterv1alpha1.AgentConnected && condition.Status == corev1.ConditionTrue {
|
||||
func isClusterReady(cluster *clusterv1alpha1.Cluster) bool {
|
||||
for _, condition := range cluster.Status.Conditions {
|
||||
if condition.Type == clusterv1alpha1.ClusterReady && condition.Status == corev1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -95,7 +90,6 @@ func isAgentReady(agent *clusterv1alpha1.Agent) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
func isClusterHostCluster(cluster *clusterv1alpha1.Cluster) bool {
|
||||
for key, value := range cluster.Annotations {
|
||||
if key == clusterv1alpha1.IsHostCluster && value == "true" {
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
// AgentsGetter has a method to return a AgentInterface.
|
||||
// A group's client should implement this interface.
|
||||
type AgentsGetter interface {
|
||||
Agents() AgentInterface
|
||||
}
|
||||
|
||||
// AgentInterface has methods to work with Agent resources.
|
||||
type AgentInterface interface {
|
||||
Create(*v1alpha1.Agent) (*v1alpha1.Agent, error)
|
||||
Update(*v1alpha1.Agent) (*v1alpha1.Agent, error)
|
||||
UpdateStatus(*v1alpha1.Agent) (*v1alpha1.Agent, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*v1alpha1.Agent, error)
|
||||
List(opts v1.ListOptions) (*v1alpha1.AgentList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error)
|
||||
AgentExpansion
|
||||
}
|
||||
|
||||
// agents implements AgentInterface
|
||||
type agents struct {
|
||||
client rest.Interface
|
||||
}
|
||||
|
||||
// newAgents returns a Agents
|
||||
func newAgents(c *ClusterV1alpha1Client) *agents {
|
||||
return &agents{
|
||||
client: c.RESTClient(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the agent, and returns the corresponding agent object, and an error if there is any.
|
||||
func (c *agents) Get(name string, options v1.GetOptions) (result *v1alpha1.Agent, err error) {
|
||||
result = &v1alpha1.Agent{}
|
||||
err = c.client.Get().
|
||||
Resource("agents").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Agents that match those selectors.
|
||||
func (c *agents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.AgentList{}
|
||||
err = c.client.Get().
|
||||
Resource("agents").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested agents.
|
||||
func (c *agents) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Resource("agents").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a agent and creates it. Returns the server's representation of the agent, and an error, if there is any.
|
||||
func (c *agents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) {
|
||||
result = &v1alpha1.Agent{}
|
||||
err = c.client.Post().
|
||||
Resource("agents").
|
||||
Body(agent).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a agent and updates it. Returns the server's representation of the agent, and an error, if there is any.
|
||||
func (c *agents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) {
|
||||
result = &v1alpha1.Agent{}
|
||||
err = c.client.Put().
|
||||
Resource("agents").
|
||||
Name(agent.Name).
|
||||
Body(agent).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *agents) UpdateStatus(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) {
|
||||
result = &v1alpha1.Agent{}
|
||||
err = c.client.Put().
|
||||
Resource("agents").
|
||||
Name(agent.Name).
|
||||
SubResource("status").
|
||||
Body(agent).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the agent and deletes it. Returns an error if one occurs.
|
||||
func (c *agents) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Resource("agents").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *agents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Resource("agents").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched agent.
|
||||
func (c *agents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) {
|
||||
result = &v1alpha1.Agent{}
|
||||
err = c.client.Patch(pt).
|
||||
Resource("agents").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
|
||||
type ClusterV1alpha1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
AgentsGetter
|
||||
ClustersGetter
|
||||
}
|
||||
|
||||
@@ -35,10 +34,6 @@ type ClusterV1alpha1Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *ClusterV1alpha1Client) Agents() AgentInterface {
|
||||
return newAgents(c)
|
||||
}
|
||||
|
||||
func (c *ClusterV1alpha1Client) Clusters() ClusterInterface {
|
||||
return newClusters(c)
|
||||
}
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
// FakeAgents implements AgentInterface
|
||||
type FakeAgents struct {
|
||||
Fake *FakeClusterV1alpha1
|
||||
}
|
||||
|
||||
var agentsResource = schema.GroupVersionResource{Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "agents"}
|
||||
|
||||
var agentsKind = schema.GroupVersionKind{Group: "cluster.kubesphere.io", Version: "v1alpha1", Kind: "Agent"}
|
||||
|
||||
// Get takes name of the agent, and returns the corresponding agent object, and an error if there is any.
|
||||
func (c *FakeAgents) Get(name string, options v1.GetOptions) (result *v1alpha1.Agent, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(agentsResource, name), &v1alpha1.Agent{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.Agent), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Agents that match those selectors.
|
||||
func (c *FakeAgents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(agentsResource, agentsKind, opts), &v1alpha1.AgentList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.AgentList{ListMeta: obj.(*v1alpha1.AgentList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.AgentList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested agents.
|
||||
func (c *FakeAgents) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(agentsResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a agent and creates it. Returns the server's representation of the agent, and an error, if there is any.
|
||||
func (c *FakeAgents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(agentsResource, agent), &v1alpha1.Agent{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.Agent), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a agent and updates it. Returns the server's representation of the agent, and an error, if there is any.
|
||||
func (c *FakeAgents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(agentsResource, agent), &v1alpha1.Agent{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.Agent), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeAgents) UpdateStatus(agent *v1alpha1.Agent) (*v1alpha1.Agent, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(agentsResource, "status", agent), &v1alpha1.Agent{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.Agent), err
|
||||
}
|
||||
|
||||
// Delete takes name of the agent and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeAgents) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteAction(agentsResource, name), &v1alpha1.Agent{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeAgents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(agentsResource, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.AgentList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched agent.
|
||||
func (c *FakeAgents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(agentsResource, name, pt, data, subresources...), &v1alpha1.Agent{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.Agent), err
|
||||
}
|
||||
@@ -28,10 +28,6 @@ type FakeClusterV1alpha1 struct {
|
||||
*testing.Fake
|
||||
}
|
||||
|
||||
func (c *FakeClusterV1alpha1) Agents() v1alpha1.AgentInterface {
|
||||
return &FakeAgents{c}
|
||||
}
|
||||
|
||||
func (c *FakeClusterV1alpha1) Clusters() v1alpha1.ClusterInterface {
|
||||
return &FakeClusters{c}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,4 @@ limitations under the License.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
type AgentExpansion interface{}
|
||||
|
||||
type ClusterExpansion interface{}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
// AgentInformer provides access to a shared informer and lister for
|
||||
// Agents.
|
||||
type AgentInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1alpha1.AgentLister
|
||||
}
|
||||
|
||||
type agentInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewAgentInformer constructs a new informer for Agent type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewAgentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredAgentInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredAgentInformer constructs a new informer for Agent type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredAgentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.ClusterV1alpha1().Agents().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.ClusterV1alpha1().Agents().Watch(options)
|
||||
},
|
||||
},
|
||||
&clusterv1alpha1.Agent{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *agentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredAgentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *agentInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&clusterv1alpha1.Agent{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *agentInformer) Lister() v1alpha1.AgentLister {
|
||||
return v1alpha1.NewAgentLister(f.Informer().GetIndexer())
|
||||
}
|
||||
@@ -24,8 +24,6 @@ import (
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// Agents returns a AgentInformer.
|
||||
Agents() AgentInformer
|
||||
// Clusters returns a ClusterInformer.
|
||||
Clusters() ClusterInformer
|
||||
}
|
||||
@@ -41,11 +39,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// Agents returns a AgentInformer.
|
||||
func (v *version) Agents() AgentInformer {
|
||||
return &agentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// Clusters returns a ClusterInformer.
|
||||
func (v *version) Clusters() ClusterInformer {
|
||||
return &clusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
|
||||
@@ -59,8 +59,6 @@ func (f *genericInformer) Lister() cache.GenericLister {
|
||||
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
|
||||
switch resource {
|
||||
// Group=cluster.kubesphere.io, Version=v1alpha1
|
||||
case v1alpha1.SchemeGroupVersion.WithResource("agents"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Agents().Informer()}, nil
|
||||
case v1alpha1.SchemeGroupVersion.WithResource("clusters"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Clusters().Informer()}, nil
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
// AgentLister helps list Agents.
|
||||
type AgentLister interface {
|
||||
// List lists all Agents in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.Agent, err error)
|
||||
// Get retrieves the Agent from the index for a given name.
|
||||
Get(name string) (*v1alpha1.Agent, error)
|
||||
AgentListerExpansion
|
||||
}
|
||||
|
||||
// agentLister implements the AgentLister interface.
|
||||
type agentLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewAgentLister returns a new AgentLister.
|
||||
func NewAgentLister(indexer cache.Indexer) AgentLister {
|
||||
return &agentLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all Agents in the indexer.
|
||||
func (s *agentLister) List(selector labels.Selector) (ret []*v1alpha1.Agent, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1alpha1.Agent))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the Agent from the index for a given name.
|
||||
func (s *agentLister) Get(name string) (*v1alpha1.Agent, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1alpha1.Resource("agent"), name)
|
||||
}
|
||||
return obj.(*v1alpha1.Agent), nil
|
||||
}
|
||||
@@ -18,10 +18,6 @@ limitations under the License.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// AgentListerExpansion allows custom methods to be added to
|
||||
// AgentLister.
|
||||
type AgentListerExpansion interface{}
|
||||
|
||||
// ClusterListerExpansion allows custom methods to be added to
|
||||
// ClusterLister.
|
||||
type ClusterListerExpansion interface{}
|
||||
|
||||
@@ -3,23 +3,30 @@ package cluster
|
||||
import (
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
clusterclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1"
|
||||
clusterinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1"
|
||||
clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -30,17 +37,31 @@ const (
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
|
||||
kubefedNamespace = "kube-federation-system"
|
||||
|
||||
hostClusterName = "kubesphere"
|
||||
|
||||
// allocate kubernetesAPIServer port in range [portRangeMin, portRangeMax] for agents if port is not specified
|
||||
// kubesphereAPIServer port is defaulted to kubernetesAPIServerPort + 10000
|
||||
portRangeMin = 6000
|
||||
portRangeMax = 7000
|
||||
|
||||
// Service port
|
||||
kubernetesPort = 6443
|
||||
kubespherePort = 80
|
||||
|
||||
defaultAgentNamespace = "kubesphere-system"
|
||||
)
|
||||
|
||||
type ClusterController struct {
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
agentClient clusterclient.AgentInterface
|
||||
clusterClient clusterclient.ClusterInterface
|
||||
client kubernetes.Interface
|
||||
hostConfig *rest.Config
|
||||
|
||||
agentLister clusterlister.AgentLister
|
||||
agentHasSynced cache.InformerSynced
|
||||
clusterClient clusterclient.ClusterInterface
|
||||
|
||||
clusterLister clusterlister.ClusterLister
|
||||
clusterHasSynced cache.InformerSynced
|
||||
@@ -52,9 +73,8 @@ type ClusterController struct {
|
||||
|
||||
func NewClusterController(
|
||||
client kubernetes.Interface,
|
||||
config *rest.Config,
|
||||
clusterInformer clusterinformer.ClusterInformer,
|
||||
agentInformer clusterinformer.AgentInformer,
|
||||
agentClient clusterclient.AgentInterface,
|
||||
clusterClient clusterclient.ClusterInterface,
|
||||
) *ClusterController {
|
||||
|
||||
@@ -62,38 +82,35 @@ func NewClusterController(
|
||||
broadcaster.StartLogging(func(format string, args ...interface{}) {
|
||||
klog.Info(fmt.Sprintf(format, args))
|
||||
})
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cluster-controller"})
|
||||
|
||||
c := &ClusterController{
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
agentClient: agentClient,
|
||||
client: client,
|
||||
hostConfig: config,
|
||||
clusterClient: clusterClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cluster"),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
c.agentLister = agentInformer.Lister()
|
||||
c.agentHasSynced = agentInformer.Informer().HasSynced
|
||||
|
||||
c.clusterLister = clusterInformer.Lister()
|
||||
c.clusterHasSynced = clusterInformer.Informer().HasSynced
|
||||
|
||||
clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addCluster,
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
newCluster := newObj.(*clusterv1alpha1.Cluster)
|
||||
oldCluster := oldObj.(*clusterv1alpha1.Cluster)
|
||||
if newCluster.ResourceVersion == oldCluster.ResourceVersion {
|
||||
return
|
||||
}
|
||||
c.addCluster(newObj)
|
||||
},
|
||||
DeleteFunc: c.addCluster,
|
||||
})
|
||||
|
||||
agentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nil,
|
||||
UpdateFunc: nil,
|
||||
DeleteFunc: nil,
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -108,7 +125,7 @@ func (c *ClusterController) Run(workers int, stopCh <-chan struct{}) error {
|
||||
klog.V(0).Info("starting cluster controller")
|
||||
defer klog.Info("shutting down cluster controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced, c.agentHasSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced) {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
@@ -156,87 +173,211 @@ func (c *ClusterController) syncCluster(key string) error {
|
||||
// cluster not found, possibly been deleted
|
||||
// need to do the cleanup
|
||||
if errors.IsNotFound(err) {
|
||||
_, err = c.agentLister.Get(name)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get cluster agent %s, %#v", name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// do the real cleanup work
|
||||
err = c.agentClient.Delete(name, &metav1.DeleteOptions{})
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Errorf("Failed to get cluster with name %s, %#v", name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
newAgent := &clusterv1alpha1.Agent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "tower",
|
||||
"cluster.kubesphere.io/name": name,
|
||||
},
|
||||
},
|
||||
Spec: clusterv1alpha1.AgentSpec{
|
||||
Token: "",
|
||||
KubeSphereAPIServerPort: 0,
|
||||
KubernetesAPIServerPort: 0,
|
||||
Proxy: "",
|
||||
Paused: !cluster.Spec.Active,
|
||||
},
|
||||
}
|
||||
// proxy service name if needed
|
||||
serviceName := fmt.Sprintf("mc-%s", cluster.Name)
|
||||
|
||||
agent, err := c.agentLister.Get(name)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
agent, err = c.agentClient.Create(newAgent)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create agent %s, %#v", name, err)
|
||||
return err
|
||||
if cluster.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object. This is equivalent
|
||||
// registering our finalizer.
|
||||
if !sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) {
|
||||
cluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterv1alpha1.Finalizer)
|
||||
if cluster, err = c.clusterClient.Update(cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) {
|
||||
// need to unJoin federation first, before there are
|
||||
// some cleanup work to do in member cluster which depends
|
||||
// agent to proxy traffic
|
||||
err = c.unJoinFederation(nil, name)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unjoin federation for cluster %s, error %v", name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// nothing to do
|
||||
} else {
|
||||
klog.Errorf("Failed to get proxy service %s, error %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = c.client.CoreV1().Services(defaultAgentNamespace).Delete(serviceName, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to delete service %s, error %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
finalizers := sets.NewString(cluster.ObjectMeta.Finalizers...)
|
||||
finalizers.Delete(clusterv1alpha1.Finalizer)
|
||||
cluster.ObjectMeta.Finalizers = finalizers.List()
|
||||
if _, err = c.clusterClient.Update(cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
oldCluster := cluster.DeepCopy()
|
||||
|
||||
// prepare for proxy to member cluster
|
||||
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
|
||||
if cluster.Spec.Connection.KubeSphereAPIServerPort == 0 ||
|
||||
cluster.Spec.Connection.KubernetesAPIServerPort == 0 {
|
||||
port, err := c.allocatePort()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Spec.Connection.KubernetesAPIServerPort = port
|
||||
cluster.Spec.Connection.KubeSphereAPIServerPort = port + 10000
|
||||
}
|
||||
|
||||
// token uninitialized, generate a new token
|
||||
if len(cluster.Spec.Connection.Token) == 0 {
|
||||
cluster.Spec.Connection.Token = c.generateToken()
|
||||
}
|
||||
|
||||
mcService := v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: cluster.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": serviceName,
|
||||
"app": serviceName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app.kubernetes.io/name": "tower",
|
||||
"app": "tower",
|
||||
},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "kubernetes",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: kubernetesPort,
|
||||
TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubernetesAPIServerPort)),
|
||||
},
|
||||
{
|
||||
Name: "kubesphere",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: kubespherePort,
|
||||
TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubeSphereAPIServerPort)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service, err := c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Create(&mcService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
} else {
|
||||
if !reflect.DeepEqual(service.Spec, mcService.Spec) {
|
||||
mcService.ObjectMeta = service.ObjectMeta
|
||||
mcService.Spec.ClusterIP = service.Spec.ClusterIP
|
||||
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Update(&mcService)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// populated the kubernetes apiEndpoint and kubesphere apiEndpoint
|
||||
cluster.Spec.Connection.KubernetesAPIEndpoint = fmt.Sprintf("https://%s:%d", service.Spec.ClusterIP, kubernetesPort)
|
||||
cluster.Spec.Connection.KubeSphereAPIEndpoint = fmt.Sprintf("http://%s:%d", service.Spec.ClusterIP, kubespherePort)
|
||||
|
||||
if !reflect.DeepEqual(oldCluster.Spec, cluster.Spec) {
|
||||
cluster, err = c.clusterClient.Update(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(cluster.Spec.Connection.KubeConfig) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var clientSet kubernetes.Interface
|
||||
var clusterConfig *rest.Config
|
||||
|
||||
// prepare for
|
||||
clientConfig, err := clientcmd.NewClientConfigFromBytes(cluster.Spec.Connection.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get agent %s, %#v", name, err)
|
||||
klog.Errorf("Unable to create client config from kubeconfig bytes, %#v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if agent.Spec.Paused != newAgent.Spec.Paused {
|
||||
agent.Spec.Paused = newAgent.Spec.Paused
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err = c.agentClient.Update(agent)
|
||||
return err
|
||||
})
|
||||
clusterConfig, err = clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get client config, %#v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// agent connection is ready, update cluster status
|
||||
// set
|
||||
if len(agent.Status.KubeConfig) != 0 && c.isAgentReady(agent) {
|
||||
clientConfig, err := clientcmd.NewClientConfigFromBytes(agent.Status.KubeConfig)
|
||||
clientSet, err = kubernetes.NewForConfig(clusterConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create ClientSet from config, %#v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cluster.Spec.JoinFederation { // trying to unJoin federation
|
||||
err = c.unJoinFederation(clusterConfig, cluster.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to create client config from kubeconfig bytes, %#v", err)
|
||||
klog.Errorf("Failed to unJoin federation for cluster %s, error %v", cluster.Name, err)
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeWarning, "UnJoinFederation", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
} else { // join federation
|
||||
_, err = c.joinFederation(clusterConfig, cluster.Name, cluster.Labels)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get client config, %#v", err)
|
||||
klog.Errorf("Failed to join federation for cluster %s, error %v", cluster.Name, err)
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeWarning, "JoinFederation", err.Error())
|
||||
return err
|
||||
}
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeNormal, "JoinFederation", "Cluster has joined federation.")
|
||||
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create ClientSet from config, %#v", err)
|
||||
return nil
|
||||
federationReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterFederated,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "",
|
||||
Message: "Cluster has joined federation control plane successfully",
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, federationReadyCondition)
|
||||
}
|
||||
|
||||
// cluster agent is ready, we can pull kubernetes cluster info through agent
|
||||
// since there is no agent necessary for host cluster, so updates for host cluster
|
||||
// is safe.
|
||||
if isConditionTrue(cluster, clusterv1alpha1.ClusterAgentAvailable) ||
|
||||
cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeDirect {
|
||||
version, err := clientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get kubernetes version, %#v", err)
|
||||
@@ -252,28 +393,25 @@ func (c *ClusterController) syncCluster(key string) error {
|
||||
}
|
||||
|
||||
cluster.Status.NodeCount = len(nodes.Items)
|
||||
|
||||
clusterReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: string(clusterv1alpha1.ClusterReady),
|
||||
Message: "Cluster is available now",
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, clusterReadyCondition)
|
||||
}
|
||||
|
||||
agentReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterAgentAvailable,
|
||||
LastUpdateTime: metav1.NewTime(time.Now()),
|
||||
LastTransitionTime: metav1.NewTime(time.Now()),
|
||||
Reason: "",
|
||||
Message: "Cluster agent is available now.",
|
||||
}
|
||||
|
||||
if c.isAgentReady(agent) {
|
||||
agentReadyCondition.Status = v1.ConditionTrue
|
||||
} else {
|
||||
agentReadyCondition.Status = v1.ConditionFalse
|
||||
}
|
||||
|
||||
c.addClusterCondition(cluster, agentReadyCondition)
|
||||
|
||||
_, err = c.clusterClient.Update(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update cluster status, %#v", err)
|
||||
return err
|
||||
if !reflect.DeepEqual(oldCluster, cluster) {
|
||||
_, err = c.clusterClient.Update(cluster)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to update cluster status, %#v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -298,50 +436,126 @@ func (c *ClusterController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if c.queue.NumRequeues(key) < maxRetries {
|
||||
klog.V(2).Infof("Error syncing virtualservice %s for service retrying, %#v", key, err)
|
||||
klog.V(2).Infof("Error syncing cluster %s, retrying, %v", key, err)
|
||||
c.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Dropping service %s out of the queue.", key)
|
||||
klog.V(4).Infof("Dropping cluster %s out of the queue.", key)
|
||||
c.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
||||
func (c *ClusterController) addAgent(obj interface{}) {
|
||||
agent := obj.(*clusterv1alpha1.Agent)
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("get agent key %s failed", agent.Name))
|
||||
return
|
||||
}
|
||||
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
func (c *ClusterController) isAgentReady(agent *clusterv1alpha1.Agent) bool {
|
||||
for _, condition := range agent.Status.Conditions {
|
||||
if condition.Type == clusterv1alpha1.AgentConnected && condition.Status == v1.ConditionTrue {
|
||||
func isConditionTrue(cluster *clusterv1alpha1.Cluster, conditionType clusterv1alpha1.ClusterConditionType) bool {
|
||||
for _, condition := range cluster.Status.Conditions {
|
||||
if condition.Type == conditionType && condition.Status == v1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// addClusterCondition add condition
|
||||
func (c *ClusterController) addClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) {
|
||||
// updateClusterCondition updates condition in cluster conditions using giving condition
|
||||
// adds condition if not existed
|
||||
func (c *ClusterController) updateClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) {
|
||||
if cluster.Status.Conditions == nil {
|
||||
cluster.Status.Conditions = make([]clusterv1alpha1.ClusterCondition, 0)
|
||||
}
|
||||
|
||||
newConditions := make([]clusterv1alpha1.ClusterCondition, 0)
|
||||
needToUpdate := true
|
||||
for _, cond := range cluster.Status.Conditions {
|
||||
if cond.Type == condition.Type {
|
||||
continue
|
||||
if cond.Status == condition.Status {
|
||||
needToUpdate = false
|
||||
continue
|
||||
} else {
|
||||
newConditions = append(newConditions, cond)
|
||||
}
|
||||
}
|
||||
newConditions = append(newConditions, cond)
|
||||
}
|
||||
|
||||
newConditions = append(newConditions, condition)
|
||||
cluster.Status.Conditions = newConditions
|
||||
if needToUpdate {
|
||||
newConditions = append(newConditions, condition)
|
||||
cluster.Status.Conditions = newConditions
|
||||
}
|
||||
}
|
||||
|
||||
func isHostCluster(cluster *clusterv1alpha1.Cluster) bool {
|
||||
for k, v := range cluster.Annotations {
|
||||
if k == clusterv1alpha1.IsHostCluster && v == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// joinFederation joins a cluster into federation clusters.
|
||||
// return nil error if kubefed cluster already exists.
|
||||
func (c *ClusterController) joinFederation(clusterConfig *rest.Config, joiningClusterName string, labels map[string]string) (*fedv1b1.KubeFedCluster, error) {
|
||||
|
||||
return joinClusterForNamespace(c.hostConfig,
|
||||
clusterConfig,
|
||||
kubefedNamespace,
|
||||
kubefedNamespace,
|
||||
hostClusterName,
|
||||
joiningClusterName,
|
||||
fmt.Sprintf("%s-secret", joiningClusterName),
|
||||
labels,
|
||||
apiextv1b1.ClusterScoped,
|
||||
false,
|
||||
false)
|
||||
}
|
||||
|
||||
// unJoinFederation unjoins a cluster from federation control plane.
|
||||
func (c *ClusterController) unJoinFederation(clusterConfig *rest.Config, unjoiningClusterName string) error {
|
||||
return unjoinCluster(c.hostConfig,
|
||||
clusterConfig,
|
||||
kubefedNamespace,
|
||||
hostClusterName,
|
||||
unjoiningClusterName,
|
||||
true,
|
||||
false)
|
||||
}
|
||||
|
||||
// allocatePort find a available port between [portRangeMin, portRangeMax] in maximumRetries
|
||||
// TODO: only works with handful clusters
|
||||
func (c *ClusterController) allocatePort() (uint16, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
clusters, err := c.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
const maximumRetries = 10
|
||||
for i := 0; i < maximumRetries; i++ {
|
||||
collision := false
|
||||
port := uint16(portRangeMin + rand.Intn(portRangeMax-portRangeMin+1))
|
||||
|
||||
for _, item := range clusters {
|
||||
if item.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy &&
|
||||
item.Spec.Connection.KubernetesAPIServerPort != 0 &&
|
||||
item.Spec.Connection.KubeSphereAPIServerPort == port {
|
||||
collision = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !collision {
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unable to allocate port after %d retries", maximumRetries)
|
||||
}
|
||||
|
||||
// generateToken returns a random 32-byte string as token
|
||||
func (c *ClusterController) generateToken() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
return fmt.Sprintf("%x", b)
|
||||
}
|
||||
|
||||
1
pkg/controller/cluster/helper.go
Normal file
1
pkg/controller/cluster/helper.go
Normal file
@@ -0,0 +1 @@
|
||||
package cluster
|
||||
720
pkg/controller/cluster/join.go
Normal file
720
pkg/controller/cluster/join.go
Normal file
@@ -0,0 +1,720 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog"
|
||||
"reflect"
|
||||
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
|
||||
"time"
|
||||
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
|
||||
)
|
||||
|
||||
var (
|
||||
// Policy rules allowing full access to resources in the cluster
|
||||
// or namespace.
|
||||
namespacedPolicyRules = []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{rbacv1.VerbAll},
|
||||
APIGroups: []string{rbacv1.APIGroupAll},
|
||||
Resources: []string{rbacv1.ResourceAll},
|
||||
},
|
||||
}
|
||||
clusterPolicyRules = []rbacv1.PolicyRule{
|
||||
namespacedPolicyRules[0],
|
||||
{
|
||||
NonResourceURLs: []string{rbacv1.NonResourceAll},
|
||||
Verbs: []string{"get"},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
tokenKey = "token"
|
||||
serviceAccountSecretTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// joinClusterForNamespace registers a cluster with a KubeFed control
|
||||
// plane. The KubeFed namespace in the joining cluster is provided by
|
||||
// the joiningNamespace parameter.
|
||||
func joinClusterForNamespace(hostConfig, clusterConfig *rest.Config, kubefedNamespace,
|
||||
joiningNamespace, hostClusterName, joiningClusterName, secretName string, labels map[string]string,
|
||||
scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
|
||||
|
||||
hostClientset, err := HostClientset(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clusterClientset, err := ClusterClientset(clusterConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get joining cluster clientset: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := genericclient.New(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Performing preflight checks.")
|
||||
err = performPreflightChecks(clusterClientset, joiningClusterName, hostClusterName, joiningNamespace, errorOnExisting)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Creating %s namespace in joining cluster", joiningNamespace)
|
||||
_, err = createKubeFedNamespace(clusterClientset, joiningNamespace, joiningClusterName, dryRun)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating %s namespace in joining cluster: %v", joiningNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
klog.V(2).Infof("Created %s namespace in joining cluster", joiningNamespace)
|
||||
|
||||
saName, err := createAuthorizedServiceAccount(clusterClientset, joiningNamespace, joiningClusterName, hostClusterName, scope, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secret, _, err := populateSecretInHostCluster(clusterClientset, hostClientset,
|
||||
saName, kubefedNamespace, joiningNamespace, joiningClusterName, secretName, dryRun)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating secret in host cluster: %s due to: %v", hostClusterName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var disabledTLSValidations []fedv1b1.TLSValidation
|
||||
if clusterConfig.TLSClientConfig.Insecure {
|
||||
disabledTLSValidations = append(disabledTLSValidations, fedv1b1.TLSAll)
|
||||
}
|
||||
|
||||
kubefedCluster, err := createKubeFedCluster(client, joiningClusterName, clusterConfig.Host,
|
||||
secret.Name, kubefedNamespace, clusterConfig.CAData, disabledTLSValidations, labels, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to create federated cluster resource: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Info("Created federated cluster resource")
|
||||
return kubefedCluster, nil
|
||||
}
|
||||
|
||||
// performPreflightChecks checks that the host and joining clusters are in
|
||||
// a consistent state.
|
||||
func performPreflightChecks(clusterClientset kubeclient.Interface, name, hostClusterName,
|
||||
kubefedNamespace string, errorOnExisting bool) error {
|
||||
// Make sure there is no existing service account in the joining cluster.
|
||||
saName := util.ClusterServiceAccountName(name, hostClusterName)
|
||||
_, err := clusterClientset.CoreV1().ServiceAccounts(kubefedNamespace).Get(saName, metav1.GetOptions{})
|
||||
|
||||
switch {
|
||||
case apierrors.IsNotFound(err):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
case errorOnExisting:
|
||||
return errors.Errorf("service account: %s already exists in joining cluster: %s", saName, name)
|
||||
default:
|
||||
klog.V(2).Infof("Service account %s already exists in joining cluster %s", saName, name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// createKubeFedCluster creates a federated cluster resource that associates
|
||||
// the cluster and secret.
|
||||
func createKubeFedCluster(client genericclient.Client, joiningClusterName, apiEndpoint,
|
||||
secretName, kubefedNamespace string, caBundle []byte, disabledTLSValidations []fedv1b1.TLSValidation,
|
||||
labels map[string]string, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) {
|
||||
fedCluster := &fedv1b1.KubeFedCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: kubefedNamespace,
|
||||
Name: joiningClusterName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: fedv1b1.KubeFedClusterSpec{
|
||||
APIEndpoint: apiEndpoint,
|
||||
CABundle: caBundle,
|
||||
SecretRef: fedv1b1.LocalSecretReference{
|
||||
Name: secretName,
|
||||
},
|
||||
DisabledTLSValidations: disabledTLSValidations,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return fedCluster, nil
|
||||
}
|
||||
|
||||
existingFedCluster := &fedv1b1.KubeFedCluster{}
|
||||
err := client.Get(context.TODO(), existingFedCluster, kubefedNamespace, joiningClusterName)
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not retrieve federated cluster %s due to %v", joiningClusterName, err)
|
||||
return nil, err
|
||||
case err == nil && errorOnExisting:
|
||||
return nil, errors.Errorf("federated cluster %s already exists in host cluster", joiningClusterName)
|
||||
case err == nil:
|
||||
existingFedCluster.Spec = fedCluster.Spec
|
||||
existingFedCluster.Labels = labels
|
||||
err = client.Update(context.TODO(), existingFedCluster)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update federated cluster %s due to %v", fedCluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return existingFedCluster, nil
|
||||
default:
|
||||
err = client.Create(context.TODO(), fedCluster)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create federated cluster %s due to %v", fedCluster.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
return fedCluster, nil
|
||||
}
|
||||
}
|
||||
|
||||
// createKubeFedNamespace creates the kubefed namespace in the cluster
|
||||
// associated with clusterClientset, if it doesn't already exist.
|
||||
func createKubeFedNamespace(clusterClientset kubeclient.Interface, kubefedNamespace,
|
||||
joiningClusterName string, dryRun bool) (*corev1.Namespace, error) {
|
||||
fedNamespace := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: kubefedNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return fedNamespace, nil
|
||||
}
|
||||
|
||||
_, err := clusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Could not get %s namespace: %v", kubefedNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
klog.V(2).Infof("Already existing %s namespace", kubefedNamespace)
|
||||
return fedNamespace, nil
|
||||
}
|
||||
|
||||
// Not found, so create.
|
||||
_, err = clusterClientset.CoreV1().Namespaces().Create(fedNamespace)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.V(2).Infof("Could not create %s namespace: %v", kubefedNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
return fedNamespace, nil
|
||||
}
|
||||
|
||||
// createAuthorizedServiceAccount creates a service account and grants
|
||||
// the privileges required by the KubeFed control plane to manage
|
||||
// resources in the joining cluster. The name of the created service
|
||||
// account is returned on success.
|
||||
func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface,
|
||||
namespace, joiningClusterName, hostClusterName string,
|
||||
scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (string, error) {
|
||||
|
||||
klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName)
|
||||
|
||||
saName, err := createServiceAccount(joiningClusterClientset, namespace,
|
||||
joiningClusterName, hostClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v",
|
||||
saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
if scope == apiextv1b1.NamespaceScoped {
|
||||
klog.V(2).Infof("Creating role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
err = createRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating role and binding for service account: %s in joining cluster: %s due to: %v", saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created role and binding for service account: %s in joining cluster: %s",
|
||||
saName, joiningClusterName)
|
||||
|
||||
klog.V(2).Infof("Creating health check cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
err = createHealthCheckClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName,
|
||||
dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating health check cluster role and binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created health check cluster role and binding for service account: %s in joining cluster: %s",
|
||||
saName, joiningClusterName)
|
||||
|
||||
} else {
|
||||
klog.V(2).Infof("Creating cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName)
|
||||
|
||||
err = createClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating cluster role and binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Created cluster role and binding for service account: %s in joining cluster: %s",
|
||||
saName, joiningClusterName)
|
||||
}
|
||||
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
// createServiceAccount creates a service account in the cluster associated
|
||||
// with clusterClientset with credentials that will be used by the host cluster
|
||||
// to access its API server.
|
||||
func createServiceAccount(clusterClientset kubeclient.Interface, namespace,
|
||||
joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) {
|
||||
saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName)
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
// Create a new service account.
|
||||
_, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Create(sa)
|
||||
switch {
|
||||
case apierrors.IsAlreadyExists(err) && errorOnExisting:
|
||||
klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName)
|
||||
return "", err
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
|
||||
return "", err
|
||||
default:
|
||||
return saName, nil
|
||||
}
|
||||
}
|
||||
|
||||
func bindingSubjects(saName, namespace string) []rbacv1.Subject {
|
||||
return []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createClusterRoleAndBinding creates an RBAC cluster role and
|
||||
// binding that allows the service account identified by saName to
|
||||
// access all resources in all namespaces in the cluster associated
|
||||
// with clientset.
|
||||
func createClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.RoleName(saName)
|
||||
|
||||
role := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: clusterPolicyRules,
|
||||
}
|
||||
existingRole, err := clientset.RbacV1().ClusterRoles().Get(roleName, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get cluster role for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
existingRole.Rules = role.Rules
|
||||
_, err := clientset.RbacV1().ClusterRoles().Update(existingRole)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
default: // role was not found
|
||||
_, err := clientset.RbacV1().ClusterRoles().Create(role)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This should limit its access to only necessary resources.
|
||||
binding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: bindingSubjects(saName, namespace),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "ClusterRole",
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get cluster role binding for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
|
||||
// must be deleted and recreated with the correct roleRef
|
||||
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
|
||||
err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not delete existing cluster role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
existingBinding.Subjects = binding.Subjects
|
||||
_, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createRoleAndBinding creates an RBAC role and binding
|
||||
// that allows the service account identified by saName to access all
|
||||
// resources in the specified namespace.
|
||||
func createRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.RoleName(saName)
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: namespacedPolicyRules,
|
||||
}
|
||||
existingRole, err := clientset.RbacV1().Roles(namespace).Get(roleName, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not retrieve role for service account %s in joining cluster %s due to %v", saName, clusterName, err)
|
||||
return err
|
||||
case errorOnExisting && err == nil:
|
||||
return errors.Errorf("role for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
existingRole.Rules = role.Rules
|
||||
_, err = clientset.RbacV1().Roles(namespace).Update(existingRole)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := clientset.RbacV1().Roles(namespace).Create(role)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
binding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: bindingSubjects(saName, namespace),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "Role",
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
|
||||
existingBinding, err := clientset.RbacV1().RoleBindings(namespace).Get(binding.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not retrieve role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("role binding for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
|
||||
// must be deleted and recreated with the correct roleRef
|
||||
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
|
||||
err = clientset.RbacV1().RoleBindings(namespace).Delete(existingBinding.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not delete existing role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
_, err = clientset.RbacV1().RoleBindings(namespace).Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
existingBinding.Subjects = binding.Subjects
|
||||
_, err = clientset.RbacV1().RoleBindings(namespace).Update(existingBinding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
_, err = clientset.RbacV1().RoleBindings(namespace).Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createHealthCheckClusterRoleAndBinding creates an RBAC cluster role and
|
||||
// binding that allows the service account identified by saName to
|
||||
// access the health check path of the cluster.
|
||||
func createHealthCheckClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.HealthCheckRoleName(saName, namespace)
|
||||
|
||||
role := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"Get"},
|
||||
NonResourceURLs: []string{"/healthz"},
|
||||
},
|
||||
// The cluster client expects to be able to list nodes to retrieve zone and region details.
|
||||
// TODO(marun) Consider making zone/region retrieval optional
|
||||
{
|
||||
Verbs: []string{"list"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
},
|
||||
},
|
||||
}
|
||||
existingRole, err := clientset.RbacV1().ClusterRoles().Get(role.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get health check cluster role for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("health check cluster role for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
existingRole.Rules = role.Rules
|
||||
_, err := clientset.RbacV1().ClusterRoles().Update(existingRole)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update health check cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
default: // role was not found
|
||||
_, err := clientset.RbacV1().ClusterRoles().Create(role)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create health check cluster role for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
binding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
},
|
||||
Subjects: bindingSubjects(saName, namespace),
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "ClusterRole",
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err != nil && !apierrors.IsNotFound(err):
|
||||
klog.V(2).Infof("Could not get health check cluster role binding for service account %s in joining cluster %s due to %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
case err == nil && errorOnExisting:
|
||||
return errors.Errorf("health check cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName)
|
||||
case err == nil:
|
||||
// The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding
|
||||
// must be deleted and recreated with the correct roleRef
|
||||
if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) {
|
||||
err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not delete existing health check cluster role binding for service account %s in joining cluster %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
existingBinding.Subjects = binding.Subjects
|
||||
_, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not update health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
_, err = clientset.RbacV1().ClusterRoleBindings().Create(binding)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v",
|
||||
saName, clusterName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateSecretInHostCluster copies the service account secret for saName
|
||||
// from the cluster referenced by clusterClientset to the client referenced by
|
||||
// hostClientset, putting it in a secret named secretName in the provided
|
||||
// namespace.
|
||||
func populateSecretInHostCluster(clusterClientset, hostClientset kubeclient.Interface,
|
||||
saName, hostNamespace, joiningNamespace, joiningClusterName, secretName string,
|
||||
dryRun bool) (*corev1.Secret, []byte, error) {
|
||||
|
||||
klog.V(2).Infof("Creating cluster credentials secret in host cluster")
|
||||
|
||||
if dryRun {
|
||||
dryRunSecret := &corev1.Secret{}
|
||||
dryRunSecret.Name = secretName
|
||||
return dryRunSecret, nil, nil
|
||||
}
|
||||
|
||||
// Get the secret from the joining cluster.
|
||||
var secret *corev1.Secret
|
||||
err := wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) {
|
||||
sa, err := clusterClientset.CoreV1().ServiceAccounts(joiningNamespace).Get(saName,
|
||||
metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, objReference := range sa.Secrets {
|
||||
saSecretName := objReference.Name
|
||||
var err error
|
||||
secret, err = clusterClientset.CoreV1().Secrets(joiningNamespace).Get(saSecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if secret.Type == corev1.SecretTypeServiceAccountToken {
|
||||
klog.V(2).Infof("Using secret named: %s", secret.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not get service account secret from joining cluster: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
token, ok := secret.Data[tokenKey]
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("Key %q not found in service account secret", tokenKey)
|
||||
}
|
||||
|
||||
// Create a secret in the host cluster containing the token.
|
||||
v1Secret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: hostNamespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
tokenKey: token,
|
||||
},
|
||||
}
|
||||
|
||||
if secretName == "" {
|
||||
v1Secret.GenerateName = joiningClusterName + "-"
|
||||
} else {
|
||||
v1Secret.Name = secretName
|
||||
}
|
||||
|
||||
var v1SecretResult *corev1.Secret
|
||||
_, err = hostClientset.CoreV1().Secrets(hostNamespace).Get(v1Secret.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Create(&v1Secret)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Could not create secret in host cluster: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
return v1SecretResult, nil, nil
|
||||
}
|
||||
klog.V(2).Infof("Could not get secret %s in host cluster: %v", v1Secret.Name, err)
|
||||
return nil, nil, err
|
||||
} else {
|
||||
v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Update(&v1Secret)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Update secret %s in host cluster failed: %v", v1Secret.Name, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// caBundle is optional so no error is suggested if it is not
|
||||
// found in the secret.
|
||||
caBundle := secret.Data["ca.crt"]
|
||||
|
||||
klog.V(2).Infof("Created secret in host cluster named: %s", v1SecretResult.Name)
|
||||
return v1SecretResult, caBundle, nil
|
||||
}
|
||||
296
pkg/controller/cluster/unjoin.go
Normal file
296
pkg/controller/cluster/unjoin.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog"
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
genericclient "sigs.k8s.io/kubefed/pkg/client/generic"
|
||||
"sigs.k8s.io/kubefed/pkg/kubefedctl/util"
|
||||
)
|
||||
|
||||
// Following code copied from sigs.k8s.io/kubefed to avoid import collision
|
||||
|
||||
// UnjoinCluster performs all the necessary steps to remove the
|
||||
// registration of a cluster from a KubeFed control plane provided the
|
||||
// required set of parameters are passed in.
|
||||
func unjoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, hostClusterName, unjoiningClusterName string, forceDeletion, dryRun bool) error {
|
||||
|
||||
hostClientset, err := util.HostClientset(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get host cluster clientset: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
var clusterClientset *kubeclient.Clientset
|
||||
if clusterConfig != nil {
|
||||
clusterClientset, err = util.ClusterClientset(clusterConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get unjoining cluster clientset: %v", err)
|
||||
if !forceDeletion {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client, err := genericclient.New(hostConfig)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Failed to get kubefed clientset: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if clusterClientset != nil {
|
||||
err := deleteRBACResources(clusterClientset, kubefedNamespace, unjoiningClusterName, hostClusterName, forceDeletion, dryRun)
|
||||
if err != nil {
|
||||
if !forceDeletion {
|
||||
return err
|
||||
}
|
||||
klog.V(2).Infof("Failed to delete RBAC resources: %v", err)
|
||||
}
|
||||
|
||||
err = deleteFedNSFromUnjoinCluster(hostClientset, clusterClientset, kubefedNamespace, unjoiningClusterName, dryRun)
|
||||
if err != nil {
|
||||
if !forceDeletion {
|
||||
return err
|
||||
}
|
||||
klog.V(2).Infof("Failed to delete kubefed namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// deletionSucceeded when all operations in deleteRBACResources and deleteFedNSFromUnjoinCluster succeed.
|
||||
err = deleteFederatedClusterAndSecret(hostClientset, client, kubefedNamespace, unjoiningClusterName, forceDeletion, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteKubeFedClusterAndSecret deletes a federated cluster resource that associates
|
||||
// the cluster and secret.
|
||||
func deleteFederatedClusterAndSecret(hostClientset kubeclient.Interface, client genericclient.Client,
|
||||
kubefedNamespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting kubefed cluster resource from namespace %q for unjoin cluster %q",
|
||||
kubefedNamespace, unjoiningClusterName)
|
||||
|
||||
fedCluster := &fedv1b1.KubeFedCluster{}
|
||||
err := client.Get(context.TODO(), fedCluster, kubefedNamespace, unjoiningClusterName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "Failed to get kubefed cluster \"%s/%s\"", kubefedNamespace, unjoiningClusterName)
|
||||
}
|
||||
|
||||
err = hostClientset.CoreV1().Secrets(kubefedNamespace).Delete(fedCluster.Spec.SecretRef.Name,
|
||||
&metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Secret \"%s/%s\" does not exist in the host cluster.", kubefedNamespace, fedCluster.Spec.SecretRef.Name)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Failed to delete secret \"%s/%s\" for unjoin cluster %q",
|
||||
kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted secret \"%s/%s\" for unjoin cluster %q", kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName)
|
||||
}
|
||||
|
||||
err = client.Delete(context.TODO(), fedCluster, fedCluster.Namespace, fedCluster.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("KubeFed cluster \"%s/%s\" does not exist in the host cluster.", fedCluster.Namespace, fedCluster.Name)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Failed to delete kubefed cluster \"%s/%s\" for unjoin cluster %q", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted kubefed cluster \"%s/%s\" for unjoin cluster %q.", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteRBACResources deletes the cluster role, cluster rolebindings and service account
|
||||
// from the unjoining cluster.
|
||||
func deleteRBACResources(unjoiningClusterClientset kubeclient.Interface,
|
||||
namespace, unjoiningClusterName, hostClusterName string, forceDeletion, dryRun bool) error {
|
||||
|
||||
saName := ClusterServiceAccountName(unjoiningClusterName, hostClusterName)
|
||||
|
||||
err := deleteClusterRoleAndBinding(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, forceDeletion, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = deleteServiceAccount(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteFedNSFromUnjoinCluster deletes the kubefed namespace from
|
||||
// the unjoining cluster so long as the unjoining cluster is not the
|
||||
// host cluster.
|
||||
func deleteFedNSFromUnjoinCluster(hostClientset, unjoiningClusterClientset kubeclient.Interface,
|
||||
kubefedNamespace, unjoiningClusterName string, dryRun bool) error {
|
||||
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
hostClusterNamespace, err := hostClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving namespace %q from host cluster", kubefedNamespace)
|
||||
}
|
||||
|
||||
unjoiningClusterNamespace, err := unjoiningClusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
|
||||
}
|
||||
|
||||
if IsPrimaryCluster(hostClusterNamespace, unjoiningClusterNamespace) {
|
||||
klog.V(2).Infof("The kubefed namespace %q does not need to be deleted from the host cluster by unjoin.", kubefedNamespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
|
||||
err = unjoiningClusterClientset.CoreV1().Namespaces().Delete(kubefedNamespace, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("The kubefed namespace %q no longer exists in unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "Could not delete kubefed namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteServiceAccount deletes a service account in the cluster associated
|
||||
// with clusterClientset with credentials that are used by the host cluster
|
||||
// to access its API server.
|
||||
func deleteServiceAccount(clusterClientset kubeclient.Interface, saName,
|
||||
namespace, unjoiningClusterName string, dryRun bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
|
||||
|
||||
// Delete a service account.
|
||||
err := clusterClientset.CoreV1().ServiceAccounts(namespace).Delete(saName,
|
||||
&metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Service account \"%s/%s\" does not exist.", namespace, saName)
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "Could not delete service account \"%s/%s\"", namespace, saName)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteClusterRoleAndBinding deletes an RBAC cluster role and binding that
|
||||
// allows the service account identified by saName to access all resources in
|
||||
// all namespaces in the cluster associated with clusterClientset.
|
||||
func deleteClusterRoleAndBinding(clusterClientset kubeclient.Interface,
|
||||
saName, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error {
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
|
||||
roleName := util.RoleName(saName)
|
||||
healthCheckRoleName := util.HealthCheckRoleName(saName, namespace)
|
||||
|
||||
// Attempt to delete all role and role bindings created by join
|
||||
for _, name := range []string{roleName, healthCheckRoleName} {
|
||||
klog.V(2).Infof("Deleting cluster role binding %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
|
||||
err := clusterClientset.RbacV1().ClusterRoleBindings().Delete(name, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Cluster role binding %q for service account %q does not exist in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete cluster role binding %q for service account %q in unjoining cluster %q",
|
||||
name, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted cluster role binding %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting cluster role %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
err = clusterClientset.RbacV1().ClusterRoles().Delete(name, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Cluster role %q for service account %q does not exist in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete cluster role %q for service account %q in unjoining cluster %q",
|
||||
name, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted cluster role %q for service account %q in unjoining cluster %q.",
|
||||
name, saName, unjoiningClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
err := clusterClientset.RbacV1().RoleBindings(namespace).Delete(roleName, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Role binding \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete role binding \"%s/%s\" for service account %q in unjoining cluster %q",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleted role binding \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Deleting role \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
err = clusterClientset.RbacV1().Roles(namespace).Delete(roleName, &metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(2).Infof("Role \"%s/%s\" for service account %q does not exist in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
} else if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not delete role \"%s/%s\" for service account %q in unjoining cluster %q",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
if !forceDeletion {
|
||||
return wrappedErr
|
||||
}
|
||||
klog.V(2).Infof("%v", wrappedErr)
|
||||
} else {
|
||||
klog.V(2).Infof("Deleting Role \"%s/%s\" for service account %q in unjoining cluster %q.",
|
||||
namespace, roleName, saName, unjoiningClusterName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
166
pkg/controller/cluster/util.go
Normal file
166
pkg/controller/cluster/util.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Default values for the federated group and version used by
|
||||
// the enable and disable subcommands of `kubefedctl`.
|
||||
const (
|
||||
DefaultFederatedGroup = "types.kubefed.io"
|
||||
DefaultFederatedVersion = "v1beta1"
|
||||
|
||||
FederatedKindPrefix = "Federated"
|
||||
)
|
||||
|
||||
// FedConfig provides a rest config based on the filesystem kubeconfig (via
|
||||
// pathOptions) and context in order to talk to the host kubernetes cluster
|
||||
// and the joining kubernetes cluster.
|
||||
type FedConfig interface {
|
||||
HostConfig(context, kubeconfigPath string) (*rest.Config, error)
|
||||
ClusterConfig(context, kubeconfigPath string) (*rest.Config, error)
|
||||
GetClientConfig(ontext, kubeconfigPath string) clientcmd.ClientConfig
|
||||
}
|
||||
|
||||
// fedConfig implements the FedConfig interface.
|
||||
type fedConfig struct {
|
||||
pathOptions *clientcmd.PathOptions
|
||||
}
|
||||
|
||||
// NewFedConfig creates a fedConfig for `kubefedctl` commands.
|
||||
func NewFedConfig(pathOptions *clientcmd.PathOptions) FedConfig {
|
||||
return &fedConfig{
|
||||
pathOptions: pathOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// HostConfig provides a rest config to talk to the host kubernetes cluster
|
||||
// based on the context and kubeconfig passed in.
|
||||
func (a *fedConfig) HostConfig(context, kubeconfigPath string) (*rest.Config, error) {
|
||||
hostConfig := a.GetClientConfig(context, kubeconfigPath)
|
||||
hostClientConfig, err := hostConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hostClientConfig, nil
|
||||
}
|
||||
|
||||
// ClusterConfig provides a rest config to talk to the joining kubernetes
|
||||
// cluster based on the context and kubeconfig passed in.
|
||||
func (a *fedConfig) ClusterConfig(context, kubeconfigPath string) (*rest.Config, error) {
|
||||
clusterConfig := a.GetClientConfig(context, kubeconfigPath)
|
||||
clusterClientConfig, err := clusterConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clusterClientConfig, nil
|
||||
}
|
||||
|
||||
// getClientConfig is a helper method to create a client config from the
|
||||
// context and kubeconfig passed as arguments.
|
||||
func (a *fedConfig) GetClientConfig(context, kubeconfigPath string) clientcmd.ClientConfig {
|
||||
loadingRules := *a.pathOptions.LoadingRules
|
||||
loadingRules.Precedence = a.pathOptions.GetLoadingPrecedence()
|
||||
loadingRules.ExplicitPath = kubeconfigPath
|
||||
overrides := &clientcmd.ConfigOverrides{
|
||||
CurrentContext: context,
|
||||
}
|
||||
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, overrides)
|
||||
}
|
||||
|
||||
// HostClientset provides a kubernetes API compliant clientset to
|
||||
// communicate with the host cluster's kubernetes API server.
|
||||
func HostClientset(config *rest.Config) (*kubeclient.Clientset, error) {
|
||||
return kubeclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
// ClusterClientset provides a kubernetes API compliant clientset to
|
||||
// communicate with the joining cluster's kubernetes API server.
|
||||
func ClusterClientset(config *rest.Config) (*kubeclient.Clientset, error) {
|
||||
return kubeclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
// ClusterServiceAccountName returns the name of a service account whose
|
||||
// credentials are used by the host cluster to access the client cluster.
|
||||
func ClusterServiceAccountName(joiningClusterName, hostClusterName string) string {
|
||||
return fmt.Sprintf("%s-%s", joiningClusterName, hostClusterName)
|
||||
}
|
||||
|
||||
// RoleName returns the name of a Role or ClusterRole and its
|
||||
// associated RoleBinding or ClusterRoleBinding that are used to allow
|
||||
// the service account to access necessary resources on the cluster.
|
||||
func RoleName(serviceAccountName string) string {
|
||||
return fmt.Sprintf("kubefed-controller-manager:%s", serviceAccountName)
|
||||
}
|
||||
|
||||
// HealthCheckRoleName returns the name of a ClusterRole and its
|
||||
// associated ClusterRoleBinding that is used to allow the service
|
||||
// account to check the health of the cluster and list nodes.
|
||||
func HealthCheckRoleName(serviceAccountName, namespace string) string {
|
||||
return fmt.Sprintf("kubefed-controller-manager:%s:healthcheck-%s", namespace, serviceAccountName)
|
||||
}
|
||||
|
||||
// IsFederatedAPIResource checks if a resource with the given Kind and group is a Federated one
|
||||
func IsFederatedAPIResource(kind, group string) bool {
|
||||
return strings.HasPrefix(kind, FederatedKindPrefix) && group == DefaultFederatedGroup
|
||||
}
|
||||
|
||||
// GetNamespace returns namespace of the current context
|
||||
func GetNamespace(hostClusterContext string, kubeconfig string, config FedConfig) (string, error) {
|
||||
clientConfig := config.GetClientConfig(hostClusterContext, kubeconfig)
|
||||
currentContext, err := CurrentContext(clientConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ns, _, err := clientConfig.Namespace()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "Failed to get ClientConfig for host cluster context %q and kubeconfig %q",
|
||||
currentContext, kubeconfig)
|
||||
}
|
||||
|
||||
if len(ns) == 0 {
|
||||
ns = "default"
|
||||
}
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
// CurrentContext retrieves the current context from the provided config.
|
||||
func CurrentContext(config clientcmd.ClientConfig) (string, error) {
|
||||
rawConfig, err := config.RawConfig()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Failed to get current context from config")
|
||||
}
|
||||
return rawConfig.CurrentContext, nil
|
||||
}
|
||||
|
||||
// IsPrimaryCluster checks if the caller is working with objects for the
|
||||
// primary cluster by checking if the UIDs match for both ObjectMetas passed
|
||||
// in.
|
||||
// TODO (font): Need to revisit this when cluster ID is available.
|
||||
func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool {
|
||||
meta := MetaAccessor(obj)
|
||||
clusterMeta := MetaAccessor(clusterObj)
|
||||
return meta.GetUID() == clusterMeta.GetUID()
|
||||
}
|
||||
|
||||
func MetaAccessor(obj pkgruntime.Object) metav1.Object {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
// This should always succeed if obj is not nil. Also,
|
||||
// adapters are slated for replacement by unstructured.
|
||||
return nil
|
||||
}
|
||||
return accessor
|
||||
}
|
||||
@@ -235,22 +235,33 @@ func (h handler) makeQueryOptions(r reqParams, lvl monitoring.Level) (q queryOpt
|
||||
if err != nil {
|
||||
return q, err
|
||||
}
|
||||
|
||||
cts := ns.CreationTimestamp.Time
|
||||
if q.start.Before(cts) {
|
||||
q.start = cts
|
||||
}
|
||||
if q.end.Before(cts) {
|
||||
return q, errors.New(ErrNoHit)
|
||||
|
||||
// Query should happen no earlier than namespace's creation time.
|
||||
// For range query, check and mutate `start`. For instant query, check and mutate `time`.
|
||||
// In range query, if `start` and `end` are both before namespace's creation time, it causes no hit.
|
||||
if !q.isRangeQuery() {
|
||||
if q.time.Before(cts) {
|
||||
q.time = cts
|
||||
}
|
||||
} else {
|
||||
if q.start.Before(cts) {
|
||||
q.start = cts
|
||||
}
|
||||
if q.end.Before(cts) {
|
||||
return q, errors.New(ErrNoHit)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Parse sorting and paging params
|
||||
if r.target != "" {
|
||||
q.target = r.target
|
||||
q.page = DefaultPage
|
||||
q.limit = DefaultLimit
|
||||
if q.order != model.OrderAscending {
|
||||
r.order = DefaultOrder
|
||||
if r.order != model.OrderAscending {
|
||||
q.order = DefaultOrder
|
||||
}
|
||||
if r.page != "" {
|
||||
q.page, err = strconv.Atoi(r.page)
|
||||
|
||||
@@ -2,12 +2,12 @@ package v1alpha3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
model "kubesphere.io/kubesphere/pkg/models/monitoring"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -100,6 +100,32 @@ func TestParseRequestParams(t *testing.T) {
|
||||
},
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
params: reqParams{
|
||||
time: "1585830000",
|
||||
namespaceName: "default",
|
||||
},
|
||||
lvl: monitoring.LevelNamespace,
|
||||
namespace: corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
CreationTimestamp: metav1.Time{
|
||||
Time: time.Unix(1585836666, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: queryOptions{
|
||||
time: time.Unix(1585836666, 0),
|
||||
identifier: model.IdentifierNamespace,
|
||||
metricFilter: ".*",
|
||||
namedMetrics: model.NamespaceMetrics,
|
||||
option: monitoring.NamespaceOption{
|
||||
ResourceFilter: ".*",
|
||||
NamespaceName: "default",
|
||||
},
|
||||
},
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
params: reqParams{
|
||||
start: "1585830000",
|
||||
@@ -151,6 +177,33 @@ func TestParseRequestParams(t *testing.T) {
|
||||
},
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
params: reqParams{
|
||||
time: "1585830000",
|
||||
workspaceName: "system-workspace",
|
||||
metricFilter: "namespace_memory_usage_wo_cache|namespace_memory_limit_hard|namespace_cpu_usage",
|
||||
page: "1",
|
||||
limit: "10",
|
||||
order: "desc",
|
||||
target: "namespace_cpu_usage",
|
||||
},
|
||||
lvl: monitoring.LevelNamespace,
|
||||
expected: queryOptions{
|
||||
time: time.Unix(1585830000, 0),
|
||||
metricFilter: "namespace_memory_usage_wo_cache|namespace_memory_limit_hard|namespace_cpu_usage",
|
||||
namedMetrics: model.NamespaceMetrics,
|
||||
option: monitoring.NamespaceOption{
|
||||
ResourceFilter: ".*",
|
||||
WorkspaceName: "system-workspace",
|
||||
},
|
||||
target: "namespace_cpu_usage",
|
||||
identifier: "namespace",
|
||||
order: "desc",
|
||||
page: 1,
|
||||
limit: 10,
|
||||
},
|
||||
expectedErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
@@ -170,8 +223,8 @@ func TestParseRequestParams(t *testing.T) {
|
||||
t.Fatalf("failed to catch error.")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected return: %v.", result)
|
||||
if diff := cmp.Diff(result, tt.expected, cmp.AllowUnexported(result, tt.expected)); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", tt.expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
package prometheus
|
||||
package expressions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/storage/metric"
|
||||
"kubesphere.io/kubesphere/pkg/models/monitoring/expressions"
|
||||
)
|
||||
|
||||
func init() {
|
||||
expressions.Register("prometheus", labelReplace)
|
||||
register("prometheus", labelReplace)
|
||||
}
|
||||
|
||||
func labelReplace(input, ns string) (string, error) {
|
||||
@@ -18,7 +17,7 @@ func labelReplace(input, ns string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
SetRecursive(root, ns)
|
||||
setRecursive(root, ns)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -27,39 +26,39 @@ func labelReplace(input, ns string) (string, error) {
|
||||
}
|
||||
|
||||
// Inspired by https://github.com/openshift/prom-label-proxy
|
||||
func SetRecursive(node promql.Node, namespace string) (err error) {
|
||||
func setRecursive(node promql.Node, namespace string) (err error) {
|
||||
switch n := node.(type) {
|
||||
case *promql.EvalStmt:
|
||||
if err := SetRecursive(n.Expr, namespace); err != nil {
|
||||
if err := setRecursive(n.Expr, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
case promql.Expressions:
|
||||
for _, e := range n {
|
||||
if err := SetRecursive(e, namespace); err != nil {
|
||||
if err := setRecursive(e, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case *promql.AggregateExpr:
|
||||
if err := SetRecursive(n.Expr, namespace); err != nil {
|
||||
if err := setRecursive(n.Expr, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
case *promql.BinaryExpr:
|
||||
if err := SetRecursive(n.LHS, namespace); err != nil {
|
||||
if err := setRecursive(n.LHS, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := SetRecursive(n.RHS, namespace); err != nil {
|
||||
if err := setRecursive(n.RHS, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
case *promql.Call:
|
||||
if err := SetRecursive(n.Args, namespace); err != nil {
|
||||
if err := setRecursive(n.Args, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
case *promql.ParenExpr:
|
||||
if err := SetRecursive(n.Expr, namespace); err != nil {
|
||||
if err := setRecursive(n.Expr, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
case *promql.UnaryExpr:
|
||||
if err := SetRecursive(n.Expr, namespace); err != nil {
|
||||
if err := setRecursive(n.Expr, namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
case *promql.NumberLiteral, *promql.StringLiteral:
|
||||
@@ -1,4 +1,4 @@
|
||||
package prometheus
|
||||
package expressions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -4,6 +4,6 @@ type labelReplaceFn func(expr, ns string) (string, error)
|
||||
|
||||
var ReplaceNamespaceFns = make(map[string]labelReplaceFn)
|
||||
|
||||
func Register(name string, fn labelReplaceFn) {
|
||||
func register(name string, fn labelReplaceFn) {
|
||||
ReplaceNamespaceFns[name] = fn
|
||||
}
|
||||
|
||||
@@ -10,21 +10,52 @@ import (
|
||||
|
||||
func TestSort(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
target string
|
||||
order string
|
||||
identifier string
|
||||
source string
|
||||
raw string
|
||||
expected string
|
||||
}{
|
||||
{"sort in ascending order", "node_cpu_utilisation", "asc", "node", "source-node-metrics.json", "sorted-node-metrics-asc.json"},
|
||||
{"sort in descending order", "node_memory_utilisation", "desc", "node", "source-node-metrics.json", "sorted-node-metrics-desc.json"},
|
||||
{"sort faulty metrics", "node_memory_utilisation", "desc", "node", "faulty-node-metrics.json", "faulty-node-metrics-sorted.json"},
|
||||
{"sort metrics with an blank node", "node_memory_utilisation", "desc", "node", "blank-node-metrics.json", "blank-node-metrics-sorted.json"},
|
||||
{
|
||||
target: "node_cpu_utilisation",
|
||||
order: "asc",
|
||||
identifier: "node",
|
||||
raw: "source-node-metrics.json",
|
||||
expected: "sorted-node-metrics-asc.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "source-node-metrics.json",
|
||||
expected: "sorted-node-metrics-desc.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "faulty-node-metrics.json",
|
||||
expected: "faulty-node-metrics-sorted.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "blank-node-metrics.json",
|
||||
expected: "blank-node-metrics-sorted.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "null-node-metrics.json",
|
||||
expected: "null-node-metrics-sorted.json",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
source, expected, err := jsonFromFile(tt.source, tt.expected)
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
source, expected, err := jsonFromFile(tt.raw, tt.expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -39,21 +70,46 @@ func TestSort(t *testing.T) {
|
||||
|
||||
func TestPage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
page int
|
||||
limit int
|
||||
source string
|
||||
raw string
|
||||
expected string
|
||||
}{
|
||||
{"page 0 limit 5", 0, 5, "sorted-node-metrics-asc.json", "sorted-node-metrics-asc.json"},
|
||||
{"page 1 limit 5", 1, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-1.json"},
|
||||
{"page 2 limit 5", 2, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-2.json"},
|
||||
{"page 3 limit 5", 3, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-3.json"},
|
||||
{"page faulty metrics", 1, 2, "faulty-node-metrics-sorted.json", "faulty-node-metrics-paged.json"},
|
||||
{
|
||||
page: 0,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "sorted-node-metrics-asc.json",
|
||||
},
|
||||
{
|
||||
page: 1,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "paged-node-metrics-1.json",
|
||||
},
|
||||
{
|
||||
page: 2,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "paged-node-metrics-2.json",
|
||||
},
|
||||
{
|
||||
page: 3,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "paged-node-metrics-3.json",
|
||||
},
|
||||
{
|
||||
page: 1,
|
||||
limit: 2,
|
||||
raw: "faulty-node-metrics-sorted.json",
|
||||
expected: "faulty-node-metrics-paged.json",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
source, expected, err := jsonFromFile(tt.source, tt.expected)
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
source, expected, err := jsonFromFile(tt.raw, tt.expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
53
pkg/models/monitoring/testdata/null-node-metrics-sorted.json
vendored
Normal file
53
pkg/models/monitoring/testdata/null-node-metrics-sorted.json
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{},
|
||||
{},
|
||||
{}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
0.5286875837861773
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
0.2497060264216553
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
0.23637090535053928
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page": 1,
|
||||
"total_page": 1,
|
||||
"total_item": 3
|
||||
}
|
||||
45
pkg/models/monitoring/testdata/null-node-metrics.json
vendored
Normal file
45
pkg/models/monitoring/testdata/null-node-metrics.json
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
0.5286875837861773
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
0.23637090535053928
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
0.2497060264216553
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -138,13 +138,20 @@ func (p prometheus) GetMetadata(namespace string) []monitoring.Metadata {
|
||||
return meta
|
||||
}
|
||||
|
||||
// Deduplication
|
||||
set := make(map[string]bool)
|
||||
for _, item := range items {
|
||||
meta = append(meta, monitoring.Metadata{
|
||||
Metric: item.Metric,
|
||||
Type: string(item.Type),
|
||||
Help: item.Help,
|
||||
})
|
||||
_, ok := set[item.Metric]
|
||||
if !ok {
|
||||
set[item.Metric] = true
|
||||
meta = append(meta, monitoring.Metadata{
|
||||
Metric: item.Metric,
|
||||
Type: string(item.Type),
|
||||
Help: item.Help,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return meta
|
||||
}
|
||||
|
||||
@@ -186,7 +193,7 @@ func parseQueryResp(value model.Value) monitoring.MetricData {
|
||||
mv.Metadata[string(k)] = string(v)
|
||||
}
|
||||
|
||||
mv.Sample = monitoring.Point{float64(v.Timestamp) / 1000, float64(v.Value)}
|
||||
mv.Sample = &monitoring.Point{float64(v.Timestamp) / 1000, float64(v.Value)}
|
||||
|
||||
res.MetricValues = append(res.MetricValues, mv)
|
||||
}
|
||||
|
||||
@@ -14,16 +14,21 @@ import (
|
||||
|
||||
func TestGetNamedMetrics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fakeResp string
|
||||
expected string
|
||||
}{
|
||||
{"prom returns good values", "metrics-vector-type-prom.json", "metrics-vector-type-res.json"},
|
||||
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
|
||||
{
|
||||
fakeResp: "metrics-vector-type-prom.json",
|
||||
expected: "metrics-vector-type-res.json",
|
||||
},
|
||||
{
|
||||
fakeResp: "metrics-error-prom.json",
|
||||
expected: "metrics-error-res.json",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
expected := make([]monitoring.Metric, 0)
|
||||
err := jsonFromFile(tt.expected, &expected)
|
||||
if err != nil {
|
||||
@@ -44,16 +49,21 @@ func TestGetNamedMetrics(t *testing.T) {
|
||||
|
||||
func TestGetNamedMetricsOverTime(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fakeResp string
|
||||
expected string
|
||||
}{
|
||||
{"prom returns good values", "metrics-matrix-type-prom.json", "metrics-matrix-type-res.json"},
|
||||
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
|
||||
{
|
||||
fakeResp: "metrics-matrix-type-prom.json",
|
||||
expected: "metrics-matrix-type-res.json",
|
||||
},
|
||||
{
|
||||
fakeResp: "metrics-error-prom.json",
|
||||
expected: "metrics-error-res.json",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
expected := make([]monitoring.Metric, 0)
|
||||
err := jsonFromFile(tt.expected, &expected)
|
||||
if err != nil {
|
||||
|
||||
@@ -114,8 +114,8 @@ var promQLTemplates = map[string]string{
|
||||
"workspace_cpu_usage": `round(sum by (label_kubesphere_io_workspace) (namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", $1}), 0.001)`,
|
||||
"workspace_memory_usage": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes:sum{namespace!="", $1})`,
|
||||
"workspace_memory_usage_wo_cache": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", $1})`,
|
||||
"workspace_net_bytes_transmitted": `sum by (label_kubesphere_io_workspace) (sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"workspace_net_bytes_received": `sum by (label_kubesphere_io_workspace) (sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"workspace_net_bytes_transmitted": `sum by (label_kubesphere_io_workspace) (sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", pod!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"workspace_net_bytes_received": `sum by (label_kubesphere_io_workspace) (sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", pod!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"workspace_pod_count": `sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
|
||||
"workspace_pod_running_count": `sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase="Running", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
|
||||
"workspace_pod_succeeded_count": `sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase="Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
|
||||
@@ -138,8 +138,8 @@ var promQLTemplates = map[string]string{
|
||||
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", $1}, 0.001)`,
|
||||
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", $1}`,
|
||||
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", $1}`,
|
||||
"namespace_net_bytes_transmitted": `sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m]) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"namespace_net_bytes_received": `sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m]) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"namespace_net_bytes_transmitted": `sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", pod!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m]) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"namespace_net_bytes_received": `sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", pod!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m]) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"namespace_pod_count": `sum by (namespace) (kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"namespace_pod_running_count": `sum by (namespace) (kube_pod_status_phase{phase="Running", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
"namespace_pod_succeeded_count": `sum by (namespace) (kube_pod_status_phase{phase="Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
|
||||
@@ -181,16 +181,16 @@ var promQLTemplates = map[string]string{
|
||||
"workload_statefulset_unavailable_replicas_ratio": `namespace:statefulset_unavailable_replicas:ratio{$1}`,
|
||||
|
||||
// pod
|
||||
"pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}, 0.001)`,
|
||||
"pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_working_set_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_net_bytes_transmitted": `label_join(sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_net_bytes_received": `label_join(sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_cpu_usage": `round(sum by (namespace, pod) (irate(container_cpu_usage_seconds_total{job="kubelet", pod!="", image!=""}[5m])) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}, 0.001)`,
|
||||
"pod_memory_usage": `sum by (namespace, pod) (container_memory_usage_bytes{job="kubelet", pod!="", image!=""}) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_memory_usage_wo_cache": `sum by (namespace, pod) (container_memory_working_set_bytes{job="kubelet", pod!="", image!=""}) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_net_bytes_transmitted": `sum by (namespace, pod) (irate(container_network_transmit_bytes_total{pod!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
"pod_net_bytes_received": `sum by (namespace, pod) (irate(container_network_receive_bytes_total{pod!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
|
||||
|
||||
// container
|
||||
"container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", $1}[5m])), 0.001)`,
|
||||
"container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", $1})`,
|
||||
"container_memory_usage_wo_cache": `sum by (namespace, pod_name, container_name) (container_memory_working_set_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", $1})`,
|
||||
"container_cpu_usage": `round(sum by (namespace, pod, container) (irate(container_cpu_usage_seconds_total{job="kubelet", container!="POD", container!="", image!="", $1}[5m])), 0.001)`,
|
||||
"container_memory_usage": `sum by (namespace, pod, container) (container_memory_usage_bytes{job="kubelet", container!="POD", container!="", image!="", $1})`,
|
||||
"container_memory_usage_wo_cache": `sum by (namespace, pod, container) (container_memory_working_set_bytes{job="kubelet", container!="POD", container!="", image!="", $1})`,
|
||||
|
||||
// pvc
|
||||
"pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
|
||||
@@ -255,25 +255,25 @@ var promQLTemplates = map[string]string{
|
||||
"prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`,
|
||||
}
|
||||
|
||||
func makeExpr(metric string, opt monitoring.QueryOptions) string {
|
||||
func makeExpr(metric string, opts monitoring.QueryOptions) string {
|
||||
tmpl := promQLTemplates[metric]
|
||||
switch opt.Level {
|
||||
switch opts.Level {
|
||||
case monitoring.LevelCluster:
|
||||
return tmpl
|
||||
case monitoring.LevelNode:
|
||||
return makeNodeMetricExpr(tmpl, opt)
|
||||
return makeNodeMetricExpr(tmpl, opts)
|
||||
case monitoring.LevelWorkspace:
|
||||
return makeWorkspaceMetricExpr(tmpl, opt)
|
||||
return makeWorkspaceMetricExpr(tmpl, opts)
|
||||
case monitoring.LevelNamespace:
|
||||
return makeNamespaceMetricExpr(tmpl, opt)
|
||||
return makeNamespaceMetricExpr(tmpl, opts)
|
||||
case monitoring.LevelWorkload:
|
||||
return makeWorkloadMetricExpr(tmpl, opt)
|
||||
return makeWorkloadMetricExpr(metric, tmpl, opts)
|
||||
case monitoring.LevelPod:
|
||||
return makePodMetricExpr(tmpl, opt)
|
||||
return makePodMetricExpr(tmpl, opts)
|
||||
case monitoring.LevelContainer:
|
||||
return makeContainerMetricExpr(tmpl, opt)
|
||||
return makeContainerMetricExpr(tmpl, opts)
|
||||
case monitoring.LevelPVC:
|
||||
return makePVCMetricExpr(tmpl, opt)
|
||||
return makePVCMetricExpr(tmpl, opts)
|
||||
case monitoring.LevelComponent:
|
||||
return tmpl
|
||||
default:
|
||||
@@ -322,23 +322,31 @@ func makeNamespaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
|
||||
return strings.Replace(tmpl, "$1", namespaceSelector, -1)
|
||||
}
|
||||
|
||||
func makeWorkloadMetricExpr(tmpl string, o monitoring.QueryOptions) string {
|
||||
func makeWorkloadMetricExpr(metric, tmpl string, o monitoring.QueryOptions) string {
|
||||
var kindSelector, workloadSelector string
|
||||
|
||||
switch o.WorkloadKind {
|
||||
case "deployment":
|
||||
o.WorkloadKind = Deployment
|
||||
kindSelector = fmt.Sprintf(`namespace="%s", deployment!="", deployment=~"%s"`, o.NamespaceName, o.ResourceFilter)
|
||||
case "statefulset":
|
||||
o.WorkloadKind = StatefulSet
|
||||
kindSelector = fmt.Sprintf(`namespace="%s", statefulset!="", statefulset=~"%s"`, o.NamespaceName, o.ResourceFilter)
|
||||
case "daemonset":
|
||||
o.WorkloadKind = DaemonSet
|
||||
kindSelector = fmt.Sprintf(`namespace="%s", daemonset!="", daemonset=~"%s"`, o.NamespaceName, o.ResourceFilter)
|
||||
default:
|
||||
o.WorkloadKind = ".*"
|
||||
kindSelector = fmt.Sprintf(`namespace="%s"`, o.NamespaceName)
|
||||
}
|
||||
workloadSelector = fmt.Sprintf(`namespace="%s", workload=~"%s:%s"`, o.NamespaceName, o.WorkloadKind, o.ResourceFilter)
|
||||
|
||||
if strings.Contains(metric, "deployment") {
|
||||
kindSelector = fmt.Sprintf(`namespace="%s", deployment!="", deployment=~"%s"`, o.NamespaceName, o.ResourceFilter)
|
||||
}
|
||||
if strings.Contains(metric, "statefulset") {
|
||||
kindSelector = fmt.Sprintf(`namespace="%s", statefulset!="", statefulset=~"%s"`, o.NamespaceName, o.ResourceFilter)
|
||||
}
|
||||
if strings.Contains(metric, "daemonset") {
|
||||
kindSelector = fmt.Sprintf(`namespace="%s", daemonset!="", daemonset=~"%s"`, o.NamespaceName, o.ResourceFilter)
|
||||
}
|
||||
|
||||
return strings.NewReplacer("$1", workloadSelector, "$2", kindSelector).Replace(tmpl)
|
||||
}
|
||||
|
||||
@@ -350,11 +358,11 @@ func makePodMetricExpr(tmpl string, o monitoring.QueryOptions) string {
|
||||
if o.WorkloadName != "" {
|
||||
switch o.WorkloadKind {
|
||||
case "deployment":
|
||||
workloadSelector = fmt.Sprintf(`owner_kind="ReplicaSet", owner_name=~"^%s-[^-]{1,10}$"`, o.WorkloadKind)
|
||||
workloadSelector = fmt.Sprintf(`owner_kind="ReplicaSet", owner_name=~"^%s-[^-]{1,10}$"`, o.WorkloadName)
|
||||
case "statefulset":
|
||||
workloadSelector = fmt.Sprintf(`owner_kind="StatefulSet", owner_name="%s"`, o.WorkloadKind)
|
||||
workloadSelector = fmt.Sprintf(`owner_kind="StatefulSet", owner_name="%s"`, o.WorkloadName)
|
||||
case "daemonset":
|
||||
workloadSelector = fmt.Sprintf(`owner_kind="DaemonSet", owner_name="%s"`, o.WorkloadKind)
|
||||
workloadSelector = fmt.Sprintf(`owner_kind="DaemonSet", owner_name="%s"`, o.WorkloadName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -385,9 +393,9 @@ func makePodMetricExpr(tmpl string, o monitoring.QueryOptions) string {
|
||||
func makeContainerMetricExpr(tmpl string, o monitoring.QueryOptions) string {
|
||||
var containerSelector string
|
||||
if o.ContainerName != "" {
|
||||
containerSelector = fmt.Sprintf(`pod_name="%s", namespace="%s", container_name="%s"`, o.PodName, o.NamespaceName, o.ContainerName)
|
||||
containerSelector = fmt.Sprintf(`pod="%s", namespace="%s", container="%s"`, o.PodName, o.NamespaceName, o.ContainerName)
|
||||
} else {
|
||||
containerSelector = fmt.Sprintf(`pod_name="%s", namespace="%s", container_name=~"%s"`, o.PodName, o.NamespaceName, o.ResourceFilter)
|
||||
containerSelector = fmt.Sprintf(`pod="%s", namespace="%s", container=~"%s"`, o.PodName, o.NamespaceName, o.ResourceFilter)
|
||||
}
|
||||
return strings.Replace(tmpl, "$1", containerSelector, -1)
|
||||
}
|
||||
|
||||
@@ -10,33 +10,162 @@ import (
|
||||
func TestMakeExpr(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
opt monitoring.QueryOptions
|
||||
opts monitoring.QueryOptions
|
||||
}{
|
||||
{"cluster_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelCluster}},
|
||||
{"node_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelNode, NodeName: "i-2dazc1d6"}},
|
||||
{"node_cpu_total", monitoring.QueryOptions{Level: monitoring.LevelNode, ResourceFilter: "i-2dazc1d6|i-ezjb7gsk"}},
|
||||
{"workspace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, WorkspaceName: "system-workspace"}},
|
||||
{"workspace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, ResourceFilter: "system-workspace|demo"}},
|
||||
{"namespace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, NamespaceName: "kube-system"}},
|
||||
{"namespace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, ResourceFilter: "kube-system|default"}},
|
||||
{"namespace_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelNamespace, WorkspaceName: "system-workspace", ResourceFilter: "kube-system|default"}},
|
||||
{"workload_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: "deployment", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
|
||||
{"workload_deployment_replica_available", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: ".*", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
|
||||
{"pod_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", WorkloadKind: "deployment", WorkloadName: "elasticsearch", ResourceFilter: "elasticsearch-0"}},
|
||||
{"pod_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", PodName: "elasticsearch-12345"}},
|
||||
{"pod_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelPod, NodeName: "i-2dazc1d6", PodName: "elasticsearch-12345"}},
|
||||
{"container_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ContainerName: "syscall"}},
|
||||
{"container_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ResourceFilter: "syscall"}},
|
||||
{"pvc_inodes_available", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", PersistentVolumeClaimName: "db-123"}},
|
||||
{"pvc_inodes_used", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", ResourceFilter: "db-123"}},
|
||||
{"pvc_inodes_total", monitoring.QueryOptions{Level: monitoring.LevelPVC, StorageClassName: "default", ResourceFilter: "db-123"}},
|
||||
{"etcd_server_list", monitoring.QueryOptions{Level: monitoring.LevelComponent}},
|
||||
{
|
||||
name: "cluster_cpu_utilisation",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelCluster,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node_cpu_utilisation",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelNode,
|
||||
NodeName: "i-2dazc1d6",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node_cpu_total",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelNode,
|
||||
ResourceFilter: "i-2dazc1d6|i-ezjb7gsk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workspace_cpu_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelWorkspace,
|
||||
WorkspaceName: "system-workspace",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workspace_memory_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelWorkspace,
|
||||
ResourceFilter: "system-workspace|demo",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace_cpu_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelNamespace,
|
||||
NamespaceName: "kube-system",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace_memory_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelNamespace,
|
||||
ResourceFilter: "kube-system|default",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace_memory_usage_wo_cache",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelNamespace,
|
||||
WorkspaceName: "system-workspace",
|
||||
ResourceFilter: "kube-system|default",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workload_cpu_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelWorkload,
|
||||
WorkloadKind: "deployment",
|
||||
NamespaceName: "default",
|
||||
ResourceFilter: "apiserver|coredns",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workload_deployment_replica_available",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelWorkload,
|
||||
WorkloadKind: ".*",
|
||||
NamespaceName: "default",
|
||||
ResourceFilter: "apiserver|coredns",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pod_cpu_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelPod,
|
||||
NamespaceName: "default",
|
||||
WorkloadKind: "deployment",
|
||||
WorkloadName: "elasticsearch",
|
||||
ResourceFilter: "elasticsearch-0",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pod_memory_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelPod,
|
||||
NamespaceName: "default",
|
||||
PodName: "elasticsearch-12345",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pod_memory_usage_wo_cache",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelPod,
|
||||
NodeName: "i-2dazc1d6",
|
||||
PodName: "elasticsearch-12345",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_cpu_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelContainer,
|
||||
NamespaceName: "default",
|
||||
PodName: "elasticsearch-12345",
|
||||
ContainerName: "syscall",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container_memory_usage",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelContainer,
|
||||
NamespaceName: "default",
|
||||
PodName: "elasticsearch-12345",
|
||||
ResourceFilter: "syscall",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pvc_inodes_available",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelPVC,
|
||||
NamespaceName: "default",
|
||||
PersistentVolumeClaimName: "db-123",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pvc_inodes_used",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelPVC,
|
||||
NamespaceName: "default",
|
||||
ResourceFilter: "db-123",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pvc_inodes_total",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelPVC,
|
||||
StorageClassName: "default",
|
||||
ResourceFilter: "db-123",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "etcd_server_list",
|
||||
opts: monitoring.QueryOptions{
|
||||
Level: monitoring.LevelComponent,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
expected := testdata.PromQLs[tt.name]
|
||||
result := makeExpr(tt.name, tt.opt)
|
||||
result := makeExpr(tt.name, tt.opts)
|
||||
if diff := cmp.Diff(result, expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,16 @@
|
||||
"help": "The total number of ZooKeeper failures.",
|
||||
"unit": ""
|
||||
},
|
||||
{
|
||||
"target": {
|
||||
"instance": "127.0.0.1:9090",
|
||||
"job": "prometheus"
|
||||
},
|
||||
"metric": "prometheus_tsdb_reloads_total",
|
||||
"type": "counter",
|
||||
"help": "Number of times the database reloaded block data from disk.",
|
||||
"unit": ""
|
||||
},
|
||||
{
|
||||
"target": {
|
||||
"instance": "127.0.0.1:9090",
|
||||
|
||||
@@ -10,12 +10,12 @@ var PromQLs = map[string]string{
|
||||
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", namespace=~"kube-system|default"}`,
|
||||
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", label_kubesphere_io_workspace="system-workspace", namespace=~"kube-system|default"}`,
|
||||
"workload_cpu_usage": `round(namespace:workload_cpu_usage:sum{namespace="default", workload=~"Deployment:apiserver|coredns"}, 0.001)`,
|
||||
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="default"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
|
||||
"pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{owner_kind="ReplicaSet", owner_name=~"^deployment-[^-]{1,10}$"} * on (namespace, pod) group_left(node) kube_pod_info{pod=~"elasticsearch-0", namespace="default"}, 0.001)`,
|
||||
"pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", namespace="default"}`,
|
||||
"pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_working_set_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", node="i-2dazc1d6"}`,
|
||||
"container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name="syscall"}[5m])), 0.001)`,
|
||||
"container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name=~"syscall"})`,
|
||||
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="default", deployment!="", deployment=~"apiserver|coredns"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
|
||||
"pod_cpu_usage": `round(sum by (namespace, pod) (irate(container_cpu_usage_seconds_total{job="kubelet", pod!="", image!=""}[5m])) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{owner_kind="ReplicaSet", owner_name=~"^elasticsearch-[^-]{1,10}$"} * on (namespace, pod) group_left(node) kube_pod_info{pod=~"elasticsearch-0", namespace="default"}, 0.001)`,
|
||||
"pod_memory_usage": `sum by (namespace, pod) (container_memory_usage_bytes{job="kubelet", pod!="", image!=""}) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", namespace="default"}`,
|
||||
"pod_memory_usage_wo_cache": `sum by (namespace, pod) (container_memory_working_set_bytes{job="kubelet", pod!="", image!=""}) * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", node="i-2dazc1d6"}`,
|
||||
"container_cpu_usage": `round(sum by (namespace, pod, container) (irate(container_cpu_usage_seconds_total{job="kubelet", container!="POD", container!="", image!="", pod="elasticsearch-12345", namespace="default", container="syscall"}[5m])), 0.001)`,
|
||||
"container_memory_usage": `sum by (namespace, pod, container) (container_memory_usage_bytes{job="kubelet", container!="POD", container!="", image!="", pod="elasticsearch-12345", namespace="default", container=~"syscall"})`,
|
||||
"pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim="db-123"}`,
|
||||
"pvc_inodes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim=~"db-123"}`,
|
||||
"pvc_inodes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{storageclass="default", persistentvolumeclaim=~"db-123"}`,
|
||||
|
||||
@@ -103,9 +103,11 @@ type PodOption struct {
|
||||
func (po PodOption) Apply(o *QueryOptions) {
|
||||
o.Level = LevelPod
|
||||
o.ResourceFilter = po.ResourceFilter
|
||||
o.NodeName = po.NodeName
|
||||
o.NamespaceName = po.NamespaceName
|
||||
o.WorkloadKind = po.WorkloadKind
|
||||
o.WorkloadName = po.WorkloadName
|
||||
o.PodName = po.PodName
|
||||
}
|
||||
|
||||
type ContainerOption struct {
|
||||
|
||||
@@ -26,8 +26,11 @@ type Point [2]float64
|
||||
|
||||
type MetricValue struct {
|
||||
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
|
||||
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
|
||||
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
|
||||
// The type of Point is a float64 array with fixed length of 2.
|
||||
// So Point will always be initialized as [0, 0], rather than nil.
|
||||
// To allow empty Sample, we should declare Sample to type *Point
|
||||
Sample *Point `json:"value,omitempty" description:"time series, values of vector type"`
|
||||
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
|
||||
}
|
||||
|
||||
func (p Point) Timestamp() float64 {
|
||||
|
||||
26
pkg/simple/client/multicluster/options.go
Normal file
26
pkg/simple/client/multicluster/options.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package multicluster
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
type Options struct {
|
||||
// Enable
|
||||
Enable bool `json:"enable"`
|
||||
EnableFederation bool `json:"enableFederation,omitempty"`
|
||||
}
|
||||
|
||||
// NewOptions() returns a default nil options
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
Enable: false,
|
||||
EnableFederation: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) Validate() []error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet, s *Options) {
|
||||
fs.BoolVar(&o.Enable, "multiple-clusters", s.Enable, ""+
|
||||
"This field instructs KubeSphere to enter multiple-cluster mode or not.")
|
||||
}
|
||||
Reference in New Issue
Block a user