diff --git a/api/ks-openapi-spec/swagger.json b/api/ks-openapi-spec/swagger.json index a4044572c..78f7d8688 100644 --- a/api/ks-openapi-spec/swagger.json +++ b/api/ks-openapi-spec/swagger.json @@ -17202,10 +17202,10 @@ }, "v1alpha2.Node": { "required": [ - "rank", + "labelMinor", "id", "label", - "labelMinor", + "rank", "controls" ], "properties": { @@ -17313,10 +17313,10 @@ }, "v1alpha2.NodeSummary": { "required": [ - "labelMinor", - "rank", "id", - "label" + "label", + "labelMinor", + "rank" ], "properties": { "adjacency": { diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index bed04da9d..7adfecd32 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -88,11 +88,13 @@ func AddControllers( client.KubeSphere(), kubesphereInformer.Devops().V1alpha1().S2iBinaries(), kubesphereInformer.Devops().V1alpha1().S2iRuns()) + devopsProjectController := devopsproject.NewController(client.Kubernetes(), client.KubeSphere(), devopsClient, informerFactory.KubernetesSharedInformerFactory().Core().V1().Namespaces(), informerFactory.KubeSphereSharedInformerFactory().Devops().V1alpha3().DevOpsProjects(), ) + devopsPipelineController := pipeline.NewController(client.Kubernetes(), client.KubeSphere(), devopsClient, @@ -120,9 +122,8 @@ func AddControllers( clusterController := cluster.NewClusterController( client.Kubernetes(), + client.Config(), kubesphereInformer.Cluster().V1alpha1().Clusters(), - kubesphereInformer.Cluster().V1alpha1().Agents(), - client.KubeSphere().ClusterV1alpha1().Agents(), client.KubeSphere().ClusterV1alpha1().Clusters()) controllers := map[string]manager.Runnable{ diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index 272bc4008..143aa72c5 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -9,6 +9,7 @@ import ( kubesphereconfig "kubesphere.io/kubesphere/pkg/apiserver/config" "kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins" "kubesphere.io/kubesphere/pkg/simple/client/k8s" + "kubesphere.io/kubesphere/pkg/simple/client/multicluster" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "kubesphere.io/kubesphere/pkg/simple/client/s3" "strings" @@ -16,24 +17,28 @@ import ( ) type KubeSphereControllerManagerOptions struct { - KubernetesOptions *k8s.KubernetesOptions - DevopsOptions *jenkins.Options - S3Options *s3.Options - OpenPitrixOptions *openpitrix.Options - LeaderElection *leaderelection.LeaderElectionConfig + KubernetesOptions *k8s.KubernetesOptions + DevopsOptions *jenkins.Options + S3Options *s3.Options + OpenPitrixOptions *openpitrix.Options + MultiClusterOptions *multicluster.Options + LeaderElect bool + LeaderElection *leaderelection.LeaderElectionConfig } func NewKubeSphereControllerManagerOptions() *KubeSphereControllerManagerOptions { s := &KubeSphereControllerManagerOptions{ - KubernetesOptions: k8s.NewKubernetesOptions(), - DevopsOptions: jenkins.NewDevopsOptions(), - S3Options: s3.NewS3Options(), - OpenPitrixOptions: openpitrix.NewOptions(), + KubernetesOptions: k8s.NewKubernetesOptions(), + DevopsOptions: jenkins.NewDevopsOptions(), + S3Options: s3.NewS3Options(), + OpenPitrixOptions: openpitrix.NewOptions(), + MultiClusterOptions: multicluster.NewOptions(), LeaderElection: &leaderelection.LeaderElectionConfig{ LeaseDuration: 30 * time.Second, RenewDeadline: 15 * time.Second, RetryPeriod: 5 * time.Second, }, + LeaderElect: false, } return s @@ -53,10 +58,15 @@ func (s *KubeSphereControllerManagerOptions) Flags() cliflag.NamedFlagSets { s.DevopsOptions.AddFlags(fss.FlagSet("devops"), s.DevopsOptions) s.S3Options.AddFlags(fss.FlagSet("s3"), s.S3Options) s.OpenPitrixOptions.AddFlags(fss.FlagSet("openpitrix"), s.OpenPitrixOptions) + s.MultiClusterOptions.AddFlags(fss.FlagSet("multicluster"), s.MultiClusterOptions) fs := fss.FlagSet("leaderelection") s.bindLeaderElectionFlags(s.LeaderElection, fs) + fs.BoolVar(&s.LeaderElect, "leader-elect", s.LeaderElect, ""+ + "Whether to enable leader election. This field should be enabled when controller manager"+ + "deployed with multiple replicas.") + kfs := fss.FlagSet("klog") local := flag.NewFlagSet("klog", flag.ExitOnError) klog.InitFlags(local) diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index 69aa0789a..5a1726585 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -37,16 +37,17 @@ import ( "kubesphere.io/kubesphere/pkg/controller/namespace" "kubesphere.io/kubesphere/pkg/controller/user" "kubesphere.io/kubesphere/pkg/controller/workspace" + "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins" "kubesphere.io/kubesphere/pkg/simple/client/k8s" - "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "kubesphere.io/kubesphere/pkg/simple/client/s3" "kubesphere.io/kubesphere/pkg/utils/term" "os" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - "sigs.k8s.io/controller-runtime/pkg/webhook" ) func NewControllerManagerCommand() *cobra.Command { @@ -60,6 +61,7 @@ func NewControllerManagerCommand() *cobra.Command { S3Options: conf.S3Options, OpenPitrixOptions: conf.OpenPitrixOptions, LeaderElection: s.LeaderElection, + LeaderElect: s.LeaderElect, } } @@ -175,6 +177,11 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) cancel() }() + if !s.LeaderElect { + run(ctx) + return nil + } + id, err := os.Hostname() if err != nil { return err diff --git a/cmd/ks-apiserver/app/options/options.go b/cmd/ks-apiserver/app/options/options.go index 855558ad3..587622047 100644 --- a/cmd/ks-apiserver/app/options/options.go +++ b/cmd/ks-apiserver/app/options/options.go @@ -17,6 +17,7 @@ import ( "kubesphere.io/kubesphere/pkg/simple/client/ldap" esclient "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch" "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus" + "kubesphere.io/kubesphere/pkg/simple/client/multicluster" "kubesphere.io/kubesphere/pkg/simple/client/network" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "kubesphere.io/kubesphere/pkg/simple/client/s3" @@ -52,6 +53,7 @@ func NewServerRunOptions() *ServerRunOptions { LdapOptions: ldap.NewOptions(), RedisOptions: cache.NewRedisOptions(), AuthenticationOptions: authoptions.NewAuthenticateOptions(), + MultiClusterOptions: multicluster.NewOptions(), }, } @@ -74,6 +76,7 @@ func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) { s.ServiceMeshOptions.AddFlags(fss.FlagSet("servicemesh"), s.ServiceMeshOptions) s.MonitoringOptions.AddFlags(fss.FlagSet("monitoring"), s.MonitoringOptions) s.LoggingOptions.AddFlags(fss.FlagSet("logging"), s.LoggingOptions) + s.MultiClusterOptions.AddFlags(fss.FlagSet("multicluster"), s.MultiClusterOptions) fs = fss.FlagSet("klog") local := flag.NewFlagSet("klog", flag.ExitOnError) diff --git a/config/crd/bases/cluster.kubesphere.io_clusters.yaml b/config/crd/bases/cluster.kubesphere.io_clusters.yaml index 1d4489fa0..b1c718e24 100644 --- a/config/crd/bases/cluster.kubesphere.io_clusters.yaml +++ b/config/crd/bases/cluster.kubesphere.io_clusters.yaml @@ -9,13 +9,13 @@ metadata: name: clusters.cluster.kubesphere.io spec: additionalPrinterColumns: - - JSONPath: .spec.federated + - JSONPath: .spec.joinFederation name: Federated type: boolean - JSONPath: .spec.provider name: Provider type: string - - JSONPath: .spec.active + - JSONPath: .spec.enable name: Active type: boolean - JSONPath: .status.kubernetesVersion @@ -47,10 +47,54 @@ spec: type: object spec: properties: - active: + connection: + description: Connection holds info to connect to the member cluster + properties: + kubeconfig: + description: KubeConfig content used to connect to cluster api server + Should provide this field explicitly if connection type is direct. + Will be populated by ks-proxy if connection type is proxy. + format: byte + type: string + kubernetesAPIEndpoint: + description: Kubernetes API Server endpoint. This can be a hostname, + hostname:port, IP or IP:port. Should provide this field explicitly + if connection type is direct. Will be populated by ks-apiserver + if connection type is proxy. + type: string + kubernetesAPIServerPort: + description: KubeAPIServerPort is the port which listens for forwarding + kube-apiserver traffic Only applicable when connection type is + proxy. + type: integer + kubesphereAPIEndpoint: + description: KubeSphere API Server endpoint. This can be a hostname, + hostname:port, IP or IP:port. Should provide this field explicitly + if connection type is direct. Will be populated by ks-apiserver + if connection type is proxy. + type: string + kubesphereAPIServerPort: + description: KubeSphereAPIServerPort is the port which listens for + forwarding kubesphere apigateway traffic Only applicable when + connection type is proxy. + type: integer + token: + description: Token used by agents of member cluster to connect to + host cluster proxy. This field is populated by apiserver only + if connection type is proxy. + type: string + type: + description: type defines how host cluster will connect to host + cluster ConnectionTypeDirect means direct connection, this requires kubeconfig + and kubesphere apiserver endpoint provided ConnectionTypeProxy + means using kubesphere proxy, no kubeconfig or kubesphere apiserver + endpoint required + type: string + type: object + enable: description: Desired state of the cluster type: boolean - federated: + joinFederation: description: Join cluster as a kubefed cluster type: boolean provider: @@ -92,12 +136,23 @@ spec: type: object type: array kubernetesVersion: - description: GitVersion of the kubernetes cluster, this field is set + description: GitVersion of the kubernetes cluster, this field is populated by cluster controller type: string nodeCount: - description: Count of the kubernetes cluster nodes + description: Count of the kubernetes cluster nodes This field may not + reflect the instant status of the cluster. type: integer + region: + description: Region is the name of the region in which all of the nodes + in the cluster exist. e.g. 'us-east1'. + type: string + zones: + description: Zones are the names of availability zones in which the + nodes of the cluster exist, e.g. 'us-east1-a'. + items: + type: string + type: array type: object type: object version: v1alpha1 diff --git a/config/crd/bases/devops.kubesphere.io_devopsprojects.yaml b/config/crd/bases/devops.kubesphere.io_devopsprojects.yaml new file mode 100644 index 000000000..e0e6e0cd2 --- /dev/null +++ b/config/crd/bases/devops.kubesphere.io_devopsprojects.yaml @@ -0,0 +1,59 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: devopsprojects.devops.kubesphere.io +spec: + group: devops.kubesphere.io + names: + categories: + - devops + kind: DevOpsProject + listKind: DevOpsProjectList + plural: devopsprojects + singular: devopsproject + scope: Cluster + validation: + openAPIV3Schema: + description: DevOpsProject is the Schema for the devopsprojects API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DevOpsProjectSpec defines the desired state of DevOpsProject + type: object + status: + description: DevOpsProjectStatus defines the observed state of DevOpsProject + properties: + adminNamespace: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object + version: v1alpha3 + versions: + - name: v1alpha3 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/devops.kubesphere.io_pipelines.yaml b/config/crd/bases/devops.kubesphere.io_pipelines.yaml new file mode 100644 index 000000000..1d3df2fa6 --- /dev/null +++ b/config/crd/bases/devops.kubesphere.io_pipelines.yaml @@ -0,0 +1,260 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: pipelines.devops.kubesphere.io +spec: + group: devops.kubesphere.io + names: + kind: Pipeline + listKind: PipelineList + plural: pipelines + singular: pipeline + scope: Namespaced + validation: + openAPIV3Schema: + description: Pipeline is the Schema for the pipelines API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PipelineSpec defines the desired state of Pipeline + properties: + multi_branch_pipeline: + properties: + bitbucket_server_source: + properties: + api_uri: + type: string + credential_id: + type: string + discover_branches: + type: integer + discover_pr_from_forks: + properties: + strategy: + type: integer + trust: + type: integer + type: object + discover_pr_from_origin: + type: integer + git_clone_option: + properties: + depth: + type: integer + shallow: + type: boolean + timeout: + type: integer + type: object + owner: + type: string + regex_filter: + type: string + repo: + type: string + scm_id: + type: string + type: object + descriptio: + type: string + discarder: + properties: + days_to_keep: + type: string + num_to_keep: + type: string + type: object + git_source: + properties: + credential_id: + type: string + discover_branches: + type: boolean + git_clone_option: + properties: + depth: + type: integer + shallow: + type: boolean + timeout: + type: integer + type: object + regex_filter: + type: string + scm_id: + type: string + url: + type: string + type: object + github_source: + properties: + api_uri: + type: string + credential_id: + type: string + discover_branches: + type: integer + discover_pr_from_forks: + properties: + strategy: + type: integer + trust: + type: integer + type: object + discover_pr_from_origin: + type: integer + git_clone_option: + properties: + depth: + type: integer + shallow: + type: boolean + timeout: + type: integer + type: object + owner: + type: string + regex_filter: + type: string + repo: + type: string + scm_id: + type: string + type: object + multibranch_job_trigger: + properties: + create_action_job_to_trigger: + type: string + delete_action_job_to_trigger: + type: string + type: object + name: + type: string + script_path: + type: string + single_svn_source: + properties: + credential_id: + type: string + remote: + type: string + scm_id: + type: string + type: object + source_type: + type: string + svn_source: + properties: + credential_id: + type: string + excludes: + type: string + includes: + type: string + remote: + type: string + scm_id: + type: string + type: object + timer_trigger: + properties: + cron: + description: user in no scm job + type: string + interval: + description: use in multi-branch job + type: string + type: object + required: + - name + - script_path + - source_type + type: object + pipeline: + properties: + descriptio: + type: string + disable_concurrent: + type: boolean + discarder: + properties: + days_to_keep: + type: string + num_to_keep: + type: string + type: object + jenkinsfile: + type: string + name: + type: string + parameters: + items: + properties: + default_value: + type: string + description: + type: string + name: + type: string + type: + type: string + required: + - name + - type + type: object + type: array + remote_trigger: + properties: + token: + type: string + type: object + timer_trigger: + properties: + cron: + description: user in no scm job + type: string + interval: + description: use in multi-branch job + type: string + type: object + required: + - name + type: object + type: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: string + required: + - type + type: object + status: + description: PipelineStatus defines the observed state of Pipeline + type: object + type: object + version: v1alpha3 + versions: + - name: v1alpha3 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/iam.kubesphere.io_policyrules.yaml b/config/crd/bases/iam.kubesphere.io_policyrules.yaml new file mode 100644 index 000000000..c0eb6ed9c --- /dev/null +++ b/config/crd/bases/iam.kubesphere.io_policyrules.yaml @@ -0,0 +1,58 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: policyrules.iam.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .scope + name: Scope + type: string + group: iam.kubesphere.io + names: + categories: + - iam + kind: PolicyRule + listKind: PolicyRuleList + plural: policyrules + singular: policyrule + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + rego: + type: string + scope: + type: string + required: + - rego + - scope + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/iam.kubesphere.io_rolebindings.yaml b/config/crd/bases/iam.kubesphere.io_rolebindings.yaml new file mode 100644 index 000000000..ede871b22 --- /dev/null +++ b/config/crd/bases/iam.kubesphere.io_rolebindings.yaml @@ -0,0 +1,104 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: rolebindings.iam.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .scope + name: Scope + type: string + - JSONPath: .roleRef.name + name: RoleRef + type: string + - JSONPath: .subjects[*].name + name: Subjects + type: string + group: iam.kubesphere.io + names: + categories: + - iam + kind: RoleBinding + listKind: RoleBindingList + plural: rolebindings + singular: rolebinding + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: RoleBinding is the Schema for the rolebindings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + roleRef: + description: RoleRef contains information that points to the role being + used + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - apiGroup + - kind + - name + type: object + scope: + type: string + subjects: + description: Subjects holds references to the users the role applies to. + items: + description: or a value for non-objects such as user and group names. + properties: + apiGroup: + description: APIGroup holds the API group of the referenced subject. + type: string + kind: + description: Kind of object being referenced. Values defined by this + API group are "User", "Group", and "ServiceAccount". If the Authorizer + does not recognized the kind value, the Authorizer should report + an error. + type: string + name: + description: Name of the object being referenced. + type: string + required: + - apiGroup + - kind + - name + type: object + type: array + required: + - roleRef + - scope + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/iam.kubesphere.io_roles.yaml b/config/crd/bases/iam.kubesphere.io_roles.yaml new file mode 100644 index 000000000..91b01e128 --- /dev/null +++ b/config/crd/bases/iam.kubesphere.io_roles.yaml @@ -0,0 +1,87 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: roles.iam.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .target.scope + name: Scope + type: string + - JSONPath: .target.name + name: Target + type: string + group: iam.kubesphere.io + names: + categories: + - iam + kind: Role + listKind: RoleList + plural: roles + singular: role + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + rules: + items: + description: RuleRef contains information that points to the role being + used + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - apiGroup + - kind + - name + type: object + type: array + target: + properties: + name: + type: string + scope: + type: string + required: + - name + - scope + type: object + required: + - rules + - target + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/go.mod b/go.mod index 27d2cccda..3f05a0dec 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,6 @@ require ( github.com/go-ldap/ldap v3.0.3+incompatible github.com/go-logr/logr v0.1.0 github.com/go-logr/zapr v0.1.1 // indirect - github.com/go-openapi/jsonreference v0.19.3 // indirect github.com/go-openapi/loads v0.19.2 github.com/go-openapi/spec v0.19.3 github.com/go-openapi/strfmt v0.19.0 @@ -52,26 +51,25 @@ require ( github.com/gorilla/websocket v1.4.0 github.com/hashicorp/go-version v1.2.0 // indirect github.com/imdario/mergo v0.3.7 // indirect - github.com/json-iterator/go v1.1.8 + github.com/json-iterator/go v1.1.9 github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/kiali/kiali v0.15.1-0.20191210080139-edbbad1ef779 github.com/kubernetes-sigs/application v0.0.0-20191210100950-18cc93526ab4 github.com/kubesphere/sonargo v0.0.2 github.com/leodido/go-urn v1.1.0 // indirect github.com/lib/pq v1.2.0 // indirect - github.com/mailru/easyjson v0.7.0 // indirect github.com/mattn/go-sqlite3 v1.11.0 // indirect github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect - github.com/onsi/ginkgo v1.8.0 - github.com/onsi/gomega v1.5.0 + github.com/onsi/ginkgo v1.12.0 + github.com/onsi/gomega v1.9.0 github.com/open-policy-agent/opa v0.18.0 github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.1 // indirect github.com/openshift/api v0.0.0-20180801171038-322a19404e37 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce - github.com/prometheus/client_golang v0.9.4 + github.com/prometheus/client_golang v1.0.0 github.com/prometheus/common v0.4.1 github.com/prometheus/prometheus v1.8.2 github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 @@ -89,25 +87,25 @@ require ( gopkg.in/go-playground/validator.v9 v9.29.1 // indirect gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect gopkg.in/src-d/go-git.v4 v4.11.0 - gopkg.in/yaml.v2 v2.2.4 + gopkg.in/yaml.v2 v2.2.8 istio.io/api v0.0.0-20191111210003-35e06ef8d838 istio.io/client-go v0.0.0-20191113122552-9bd0ba57c3d2 - k8s.io/api v0.0.0-20191114100352-16d7abae0d2a - k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 - k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb - k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 - k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 - k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 - k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 + k8s.io/api v0.17.3 + k8s.io/apiextensions-apiserver v0.17.3 + k8s.io/apimachinery v0.17.3 + k8s.io/apiserver v0.17.3 + k8s.io/client-go v0.17.3 + k8s.io/code-generator v0.17.3 + k8s.io/component-base v0.17.3 k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e // indirect k8s.io/klog v1.0.0 k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a - k8s.io/utils v0.0.0-20191114184206-e782cd3c129f // indirect kubesphere.io/im v0.1.0 // indirect openpitrix.io/iam v0.1.0 // indirect openpitrix.io/openpitrix v0.4.1-0.20190920134345-4d2be6e4965c - sigs.k8s.io/controller-runtime v0.4.0 + sigs.k8s.io/controller-runtime v0.5.0 sigs.k8s.io/controller-tools v0.2.4 + sigs.k8s.io/kubefed v0.2.0-alpha.1 ) replace ( @@ -121,6 +119,7 @@ replace ( github.com/Azure/go-autorest/logger => github.com/Azure/go-autorest/logger v0.1.0 github.com/Azure/go-autorest/tracing => github.com/Azure/go-autorest/tracing v0.5.0 github.com/BurntSushi/toml => github.com/BurntSushi/toml v0.3.1 + github.com/MakeNowJust/heredoc => github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd github.com/Masterminds/semver => github.com/Masterminds/semver v1.5.0 github.com/Microsoft/go-winio => github.com/Microsoft/go-winio v0.4.12 github.com/NYTimes/gziphandler => github.com/NYTimes/gziphandler v1.1.1 @@ -144,6 +143,7 @@ replace ( github.com/bmizerany/assert => github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/cenkalti/backoff => github.com/cenkalti/backoff v2.2.1+incompatible github.com/cespare/xxhash => github.com/cespare/xxhash v1.1.0 + github.com/chai2010/gettext-go => github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 github.com/cheekybits/genny => github.com/cheekybits/genny v1.0.0 github.com/client9/misspell => github.com/client9/misspell v0.3.4 github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.3 @@ -154,6 +154,7 @@ replace ( github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/cpuguy83/go-md2man => github.com/cpuguy83/go-md2man v1.0.10 github.com/davecgh/go-spew => github.com/davecgh/go-spew v1.1.1 + github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd github.com/deckarep/golang-set => github.com/deckarep/golang-set v1.7.1 github.com/denisenkom/go-mssqldb => github.com/denisenkom/go-mssqldb v0.0.0-20190204142019-df6d76eb9289 github.com/dgrijalva/jwt-go => github.com/dgrijalva/jwt-go v3.2.0+incompatible @@ -175,6 +176,7 @@ replace ( github.com/emirpasic/gods => github.com/emirpasic/gods v1.12.0 github.com/erikstmartin/go-testdb => github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 github.com/evanphx/json-patch => github.com/evanphx/json-patch v4.5.0+incompatible + github.com/exponent-io/jsonpath => github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d github.com/fatih/camelcase => github.com/fatih/camelcase v1.0.0 github.com/fatih/color => github.com/fatih/color v1.7.0 github.com/fatih/structs => github.com/fatih/structs v1.1.0 @@ -216,6 +218,9 @@ replace ( github.com/golang/mock => github.com/golang/mock v1.2.0 github.com/golang/protobuf => github.com/golang/protobuf v1.3.2 github.com/golang/snappy => github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db + github.com/golangplus/bytes => github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450 + github.com/golangplus/fmt => github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995 + github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e github.com/google/btree => github.com/google/btree v1.0.0 github.com/google/go-cmp => github.com/google/go-cmp v0.3.0 github.com/google/go-querystring => github.com/google/go-querystring v1.0.0 @@ -266,11 +271,14 @@ replace ( github.com/kr/pty => github.com/kr/pty v1.1.5 github.com/kr/text => github.com/kr/text v0.1.0 github.com/kubernetes-sigs/application => github.com/kubesphere/application v0.0.0-20191210100950-18cc93526ab4 + github.com/kubernetes-sigs/federation-v2 => github.com/kubernetes-sigs/federation-v2 v0.0.10 github.com/kubesphere/s2ioperator => github.com/kubesphere/s2ioperator v0.0.14 github.com/kubesphere/sonargo => github.com/kubesphere/sonargo v0.0.2 github.com/kylelemons/godebug => github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 github.com/leodido/go-urn => github.com/leodido/go-urn v1.1.0 github.com/lib/pq => github.com/lib/pq v1.2.0 + github.com/liggitt/tabwriter => github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de + github.com/lithammer/dedent => github.com/lithammer/dedent v1.1.0 github.com/lucas-clemente/quic-go => github.com/lucas-clemente/quic-go v0.11.1 github.com/magiconair/properties => github.com/magiconair/properties v1.8.0 github.com/mailru/easyjson => github.com/mailru/easyjson v0.7.0 @@ -284,6 +292,7 @@ replace ( github.com/mholt/certmagic => github.com/mholt/certmagic v0.5.1 github.com/miekg/dns => github.com/miekg/dns v1.1.9 github.com/mitchellh/go-homedir => github.com/mitchellh/go-homedir v1.1.0 + github.com/mitchellh/go-wordwrap => github.com/mitchellh/go-wordwrap v1.0.0 github.com/mitchellh/mapstructure => github.com/mitchellh/mapstructure v1.1.2 github.com/mna/pigeon => github.com/mna/pigeon v0.0.0-20180808201053-bb0192cfc2ae github.com/modern-go/concurrent => github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -303,6 +312,7 @@ replace ( github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.1 github.com/openshift/api => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 github.com/openshift/build-machinery-go => github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160 + github.com/openshift/generic-admission-server => github.com/openshift/generic-admission-server v1.14.0 github.com/opentracing/opentracing-go => github.com/opentracing/opentracing-go v1.1.0 github.com/pborman/uuid => github.com/pborman/uuid v1.2.0 github.com/pelletier/go-buffruneio => github.com/pelletier/go-buffruneio v0.2.0 @@ -354,6 +364,7 @@ replace ( github.com/urfave/cli => github.com/urfave/cli v1.20.0 github.com/xanzy/ssh-agent => github.com/xanzy/ssh-agent v0.2.1 github.com/xiang90/probing => github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 + github.com/xlab/handysort => github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 github.com/xlab/treeprint => github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 github.com/xordataexchange/crypt => github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 github.com/yashtewari/glob-intersection => github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b @@ -411,12 +422,16 @@ replace ( k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb k8s.io/apiserver => k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.3 k8s.io/client-go => k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 + k8s.io/cluster-registry => k8s.io/cluster-registry v0.0.6 k8s.io/code-generator => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 k8s.io/component-base => k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 k8s.io/gengo => k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e k8s.io/klog => k8s.io/klog v1.0.0 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a + k8s.io/kubectl => k8s.io/kubectl v0.17.3 + k8s.io/metrics => k8s.io/metrics v0.17.3 k8s.io/utils => k8s.io/utils v0.0.0-20191114184206-e782cd3c129f kubesphere.io/application => kubesphere.io/application v0.0.0-20190404151855-67ae7f915d4e kubesphere.io/im => kubesphere.io/im v0.1.0 @@ -431,7 +446,10 @@ replace ( rsc.io/goversion => rsc.io/goversion v1.0.0 sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.4.0 sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.2.4 + sigs.k8s.io/kubefed => sigs.k8s.io/kubefed v0.2.0-alpha.1 + sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca sigs.k8s.io/testing_frameworks => sigs.k8s.io/testing_frameworks v0.1.2 sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.1.0 + vbom.ml/util => vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc ) diff --git a/go.sum b/go.sum index 3e9a3bfd0..e1063bda5 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,7 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= @@ -60,6 +61,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -75,6 +77,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/denisenkom/go-mssqldb v0.0.0-20190204142019-df6d76eb9289/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= @@ -110,6 +113,7 @@ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3 github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= @@ -126,11 +130,9 @@ github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk= github.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -165,7 +167,6 @@ github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDA github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= @@ -188,6 +189,9 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -210,7 +214,6 @@ github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -256,7 +259,6 @@ github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7 h1:SWlt7BoQNASb github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7/go.mod h1:Y2SaZf2Rzd0pXkLVhLlCiAXFCLSXAIbTKDivVgff/AM= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -273,6 +275,8 @@ github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= @@ -288,6 +292,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mna/pigeon v0.0.0-20180808201053-bb0192cfc2ae/go.mod h1:Iym28+kJVnC1hfQvv5MUtI6AiFFzvQjHcvI4RFTG/04= @@ -315,6 +320,7 @@ github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVo github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openshift/api v0.0.0-20180801171038-322a19404e37 h1:05irGU4HK4IauGGDbsk+ZHrm1wOzMLYjMlfaiqMrBYc= github.com/openshift/api v0.0.0-20180801171038-322a19404e37/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= +github.com/openshift/generic-admission-server v1.14.0/go.mod h1:GD9KN/W4KxqRQGVMbqQHpHzb2XcQVvLCaBaSciqXvfM= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -401,6 +407,7 @@ github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70 github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY= @@ -497,6 +504,7 @@ k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb h1:ZUNsbuPdXWrj0rZziRfCWc k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 h1:+FvAOv/4JyYgZanQI8h+UW9FCmLzyEz7EZunuET6p5g= k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682/go.mod h1:Idob8Va6/sMX5SmwPLsU0pdvFlkwxuJ5x+fXMG8NbKE= +k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 h1:07mhG/2oEoo3N+sHVOo0L9PJ/qvbk3N5n2dj8IWefnQ= k8s.io/client-go v0.0.0-20191114101535-6c5935290e33/go.mod h1:4L/zQOBkEf4pArQJ+CMk1/5xjA30B5oyWv+Bzb44DOw= k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 h1:NMYlxaF7rYQJk2E2IyrUhaX81zX24+dmoZdkPw0gJqI= @@ -509,6 +517,9 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kubectl v0.17.3 h1:9HHYj07kuFkM+sMJMOyQX29CKWq4lvKAG1UIPxNPMQ4= +k8s.io/kubectl v0.17.3/go.mod h1:NUn4IBY7f7yCMwSop2HCXlw/MVYP4HJBiUmOR3n9w28= +k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= kubesphere.io/application v0.0.0-20190404151855-67ae7f915d4e/go.mod h1:NhUQ0ZUdFz8NTQ+SvQG0JUKAn+q71v3TPExjsjRPIZI= @@ -529,9 +540,13 @@ sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9 sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= sigs.k8s.io/controller-tools v0.2.4 h1:la1h46EzElvWefWLqfsXrnsO3lZjpkI0asTpX6h8PLA= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= +sigs.k8s.io/kubefed v0.2.0-alpha.1 h1:nzaQ4HDReHLECXMv7iszHBLx3+GO3/Iwlw7dkS71qCw= +sigs.k8s.io/kubefed v0.2.0-alpha.1/go.mod h1:/X4yMEvaclI6CAeVwFBjtGJ1E3gwXcuVwNbGPXPz+CM= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca h1:6dsH6AYQWbyZmtttJNe8Gq1cXOeS1BdV3eW37zHilAQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/pkg/apis/cluster/v1alpha1/agent_types.go b/pkg/apis/cluster/v1alpha1/agent_types.go deleted file mode 100644 index 82f407bbe..000000000 --- a/pkg/apis/cluster/v1alpha1/agent_types.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - ResourceKindAgent = "Agent" - ResourcesSingularAgent = "agent" - ResourcesPluralAgent = "agents" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// AgentSpec defines the desired state of Agent -type AgentSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - // Token used by agents to connect to proxy. - // +optional - Token string `json:"token,omitempty"` - - // Proxy address - // +optional - Proxy string `json:"proxy,omitempty"` - - // KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic - // +optional - KubernetesAPIServerPort uint16 `json:"kubernetesAPIServerPort,omitempty"` - - // KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic - // +optional - KubeSphereAPIServerPort uint16 `json:"kubesphereAPIServerPort,omitempty"` - - // Indicates that the agent is paused. - // +optional - Paused bool `json:"paused,omitempty"` -} - -type AgentConditionType string - -const ( - // Agent is initialized, and waiting for establishing to a proxy server - AgentInitialized AgentConditionType = "Initialized" - - // Agent has successfully connected to proxy server - AgentConnected AgentConditionType = "Connected" -) - -type AgentCondition struct { - // Type of AgentCondition - Type AgentConditionType `json:"type,omitempty"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status"` - // The last time this condition was updated. - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // The reason for the condition's last transition. - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - Message string `json:"message,omitempty"` -} - -// AgentStatus defines the observed state of Agent -type AgentStatus struct { - - // Represents the latest available observations of a agent's current state. - Conditions []AgentCondition `json:"conditions,omitempty"` - - // Represents the connection quality, in ms - Ping uint64 `json:"ping,omitempty"` - - // Issued new kubeconfig by proxy server - KubeConfig []byte `json:"kubeconfig,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -// +genclient:nonNamespaced -// +kubebuilder:printcolumn:name="Paused",type="bool",JSONPath=".spec.Paused" -// +kubebuilder:resource:scope=Cluster - -// Agent is the Schema for the agents API -type Agent struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AgentSpec `json:"spec,omitempty"` - Status AgentStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AgentList contains a list of Agent -type AgentList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Agent `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Agent{}, &AgentList{}) -} diff --git a/pkg/apis/cluster/v1alpha1/cluster_types.go b/pkg/apis/cluster/v1alpha1/cluster_types.go index 859e678b9..44715d96e 100644 --- a/pkg/apis/cluster/v1alpha1/cluster_types.go +++ b/pkg/apis/cluster/v1alpha1/cluster_types.go @@ -11,20 +11,71 @@ const ( ResourcesPluralCluster = "clusters" IsHostCluster = "cluster.kubesphere.io/is-host-cluster" + // Description of which region the cluster been placed + ClusterRegion = "cluster.kubesphere.io/region" + // Name of the cluster group + ClusterGroup = "cluster.kubesphere.io/group" + + Finalizer = "finalizer.cluster.kubesphere.io" ) type ClusterSpec struct { // Join cluster as a kubefed cluster - // +optional - Federated bool `json:"federated,omitempty"` + JoinFederation bool `json:"joinFederation,omitempty"` // Desired state of the cluster - Active bool `json:"active,omitempty"` + Enable bool `json:"enable,omitempty"` // Provider of the cluster, this field is just for description - // +optional Provider string `json:"provider,omitempty"` + + // Connection holds info to connect to the member cluster + Connection Connection `json:"connection,omitempty"` +} + +type ConnectionType string + +const ( + ConnectionTypeDirect ConnectionType = "direct" + ConnectionTypeProxy ConnectionType = "proxy" +) + +type Connection struct { + + // type defines how host cluster will connect to host cluster + // ConnectionTypeDirect means direct connection, this requires + // kubeconfig and kubesphere apiserver endpoint provided + // ConnectionTypeProxy means using kubesphere proxy, no kubeconfig + // or kubesphere apiserver endpoint required + Type ConnectionType `json:"type,omitempty"` + + // KubeSphere API Server endpoint. Example: http://10.10.0.11:8080 + // Should provide this field explicitly if connection type is direct. + // Will be populated by ks-apiserver if connection type is proxy. + KubeSphereAPIEndpoint string `json:"kubesphereAPIEndpoint,omitempty"` + + // Kubernetes API Server endpoint. Example: https://10.10.0.1:6443 + // Should provide this field explicitly if connection type is direct. + // Will be populated by ks-apiserver if connection type is proxy. + KubernetesAPIEndpoint string `json:"kubernetesAPIEndpoint,omitempty"` + + // KubeConfig content used to connect to cluster api server + // Should provide this field explicitly if connection type is direct. + // Will be populated by ks-proxy if connection type is proxy. + KubeConfig []byte `json:"kubeconfig,omitempty"` + + // Token used by agents of member cluster to connect to host cluster proxy. + // This field is populated by apiserver only if connection type is proxy. + Token string `json:"token,omitempty"` + + // KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic + // Only applicable when connection type is proxy. + KubernetesAPIServerPort uint16 `json:"kubernetesAPIServerPort,omitempty"` + + // KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic + // Only applicable when connection type is proxy. + KubeSphereAPIServerPort uint16 `json:"kubesphereAPIServerPort,omitempty"` } type ClusterConditionType string @@ -38,6 +89,9 @@ const ( // Cluster has been one of federated clusters ClusterFederated ClusterConditionType = "Federated" + + // Cluster is all available for requests + ClusterReady ClusterConditionType = "Ready" ) type ClusterCondition struct { @@ -60,22 +114,29 @@ type ClusterStatus struct { // Represents the latest available observations of a cluster's current state. Conditions []ClusterCondition `json:"conditions,omitempty"` - // GitVersion of the kubernetes cluster, this field is set by cluster controller - // +optional + // GitVersion of the kubernetes cluster, this field is populated by cluster controller KubernetesVersion string `json:"kubernetesVersion,omitempty"` // Count of the kubernetes cluster nodes - // +optional + // This field may not reflect the instant status of the cluster. NodeCount int `json:"nodeCount,omitempty"` + + // Zones are the names of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // +optional + Zones []string `json:"zones,omitempty"` + + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + // +optional + Region *string `json:"region,omitempty"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true // +genclient:nonNamespaced -// +kubebuilder:printcolumn:name="Federated",type="boolean",JSONPath=".spec.federated" +// +kubebuilder:printcolumn:name="Federated",type="boolean",JSONPath=".spec.joinFederation" // +kubebuilder:printcolumn:name="Provider",type="string",JSONPath=".spec.provider" -// +kubebuilder:printcolumn:name="Active",type="boolean",JSONPath=".spec.active" +// +kubebuilder:printcolumn:name="Active",type="boolean",JSONPath=".spec.enable" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.kubernetesVersion" // +kubebuilder:resource:scope=Cluster diff --git a/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go index 4df23c853..a5d35bd86 100644 --- a/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 @@ -24,137 +24,13 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Agent) DeepCopyInto(out *Agent) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Agent. -func (in *Agent) DeepCopy() *Agent { - if in == nil { - return nil - } - out := new(Agent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Agent) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AgentCondition) DeepCopyInto(out *AgentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentCondition. -func (in *AgentCondition) DeepCopy() *AgentCondition { - if in == nil { - return nil - } - out := new(AgentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AgentList) DeepCopyInto(out *AgentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Agent, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentList. -func (in *AgentList) DeepCopy() *AgentList { - if in == nil { - return nil - } - out := new(AgentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AgentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AgentSpec) DeepCopyInto(out *AgentSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentSpec. -func (in *AgentSpec) DeepCopy() *AgentSpec { - if in == nil { - return nil - } - out := new(AgentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AgentStatus) DeepCopyInto(out *AgentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]AgentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.KubeConfig != nil { - in, out := &in.KubeConfig, &out.KubeConfig - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentStatus. -func (in *AgentStatus) DeepCopy() *AgentStatus { - if in == nil { - return nil - } - out := new(AgentStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. @@ -180,7 +56,6 @@ func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { *out = *in in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. @@ -205,7 +80,6 @@ func (in *ClusterList) DeepCopyInto(out *ClusterList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. @@ -229,7 +103,7 @@ func (in *ClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = *in - return + in.Connection.DeepCopyInto(&out.Connection) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. @@ -252,7 +126,16 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. @@ -264,3 +147,23 @@ func (in *ClusterStatus) DeepCopy() *ClusterStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.KubeConfig != nil { + in, out := &in.KubeConfig, &out.KubeConfig + *out = make([]byte, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/devops/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/devops/v1alpha1/zz_generated.deepcopy.go index d0bb2e679..c1c383403 100644 --- a/pkg/apis/devops/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/devops/v1alpha1/zz_generated.deepcopy.go @@ -16,12 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -33,7 +33,6 @@ func (in *AuthConfig) DeepCopyInto(out *AuthConfig) { *out = new(v1.LocalObjectReference) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthConfig. @@ -49,7 +48,6 @@ func (in *AuthConfig) DeepCopy() *AuthConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CGroupLimits) DeepCopyInto(out *CGroupLimits) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CGroupLimits. @@ -77,7 +75,6 @@ func (in *ContainerConfig) DeepCopyInto(out *ContainerConfig) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfig. @@ -103,7 +100,6 @@ func (in *ContainerInfo) DeepCopyInto(out *ContainerInfo) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerInfo. @@ -119,7 +115,6 @@ func (in *ContainerInfo) DeepCopy() *ContainerInfo { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. @@ -135,7 +130,6 @@ func (in *DockerConfig) DeepCopy() *DockerConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerConfigEntry) DeepCopyInto(out *DockerConfigEntry) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfigEntry. @@ -158,7 +152,6 @@ func (in *DockerConfigJson) DeepCopyInto(out *DockerConfigJson) { (*out)[key] = val } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfigJson. @@ -179,7 +172,6 @@ func (in DockerConfigMap) DeepCopyInto(out *DockerConfigMap) { for key, val := range *in { (*out)[key] = val } - return } } @@ -196,7 +188,6 @@ func (in DockerConfigMap) DeepCopy() DockerConfigMap { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EnvironmentSpec) DeepCopyInto(out *EnvironmentSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentSpec. @@ -217,7 +208,6 @@ func (in *Parameter) DeepCopyInto(out *Parameter) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. @@ -233,7 +223,6 @@ func (in *Parameter) DeepCopy() *Parameter { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. @@ -259,7 +248,6 @@ func (in *S2iAutoScale) DeepCopyInto(out *S2iAutoScale) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iAutoScale. @@ -279,7 +267,6 @@ func (in *S2iBinary) DeepCopyInto(out *S2iBinary) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinary. @@ -312,7 +299,6 @@ func (in *S2iBinaryList) DeepCopyInto(out *S2iBinaryList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinaryList. @@ -340,7 +326,6 @@ func (in *S2iBinarySpec) DeepCopyInto(out *S2iBinarySpec) { in, out := &in.UploadTimeStamp, &out.UploadTimeStamp *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinarySpec. @@ -356,7 +341,6 @@ func (in *S2iBinarySpec) DeepCopy() *S2iBinarySpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S2iBinaryStatus) DeepCopyInto(out *S2iBinaryStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBinaryStatus. @@ -377,7 +361,6 @@ func (in *S2iBuildResult) DeepCopyInto(out *S2iBuildResult) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuildResult. @@ -393,7 +376,6 @@ func (in *S2iBuildResult) DeepCopy() *S2iBuildResult { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S2iBuildSource) DeepCopyInto(out *S2iBuildSource) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuildSource. @@ -413,7 +395,6 @@ func (in *S2iBuilder) DeepCopyInto(out *S2iBuilder) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilder. @@ -446,7 +427,6 @@ func (in *S2iBuilderList) DeepCopyInto(out *S2iBuilderList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderList. @@ -480,7 +460,6 @@ func (in *S2iBuilderSpec) DeepCopyInto(out *S2iBuilderSpec) { *out = new(UserDefineTemplate) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderSpec. @@ -505,7 +484,6 @@ func (in *S2iBuilderStatus) DeepCopyInto(out *S2iBuilderStatus) { in, out := &in.LastRunStartTime, &out.LastRunStartTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderStatus. @@ -525,7 +503,6 @@ func (in *S2iBuilderTemplate) DeepCopyInto(out *S2iBuilderTemplate) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplate. @@ -558,7 +535,6 @@ func (in *S2iBuilderTemplateList) DeepCopyInto(out *S2iBuilderTemplateList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplateList. @@ -596,7 +572,6 @@ func (in *S2iBuilderTemplateSpec) DeepCopyInto(out *S2iBuilderTemplateSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplateSpec. @@ -612,7 +587,6 @@ func (in *S2iBuilderTemplateSpec) DeepCopy() *S2iBuilderTemplateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S2iBuilderTemplateStatus) DeepCopyInto(out *S2iBuilderTemplateStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iBuilderTemplateStatus. @@ -715,7 +689,6 @@ func (in *S2iConfig) DeepCopyInto(out *S2iConfig) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iConfig. @@ -735,7 +708,6 @@ func (in *S2iRun) DeepCopyInto(out *S2iRun) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRun. @@ -768,7 +740,6 @@ func (in *S2iRunList) DeepCopyInto(out *S2iRunList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRunList. @@ -792,7 +763,6 @@ func (in *S2iRunList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S2iRunSpec) DeepCopyInto(out *S2iRunSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRunSpec. @@ -826,7 +796,6 @@ func (in *S2iRunStatus) DeepCopyInto(out *S2iRunStatus) { *out = new(S2iBuildSource) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S2iRunStatus. @@ -849,7 +818,6 @@ func (in *UserDefineTemplate) DeepCopyInto(out *UserDefineTemplate) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefineTemplate. @@ -865,7 +833,6 @@ func (in *UserDefineTemplate) DeepCopy() *UserDefineTemplate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec. diff --git a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go index ba1dbd06e..8ac0e86bc 100644 --- a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha2 @@ -274,7 +274,6 @@ func (in *User) DeepCopyInto(out *User) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. @@ -299,7 +298,6 @@ func (in *User) DeepCopyObject() runtime.Object { func (in *UserCondition) DeepCopyInto(out *UserCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserCondition. @@ -349,7 +347,6 @@ func (in *UserList) DeepCopyInto(out *UserList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. @@ -383,7 +380,6 @@ func (in *UserSpec) DeepCopyInto(out *UserSpec) { *out = make([]FinalizerName, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. @@ -406,7 +402,6 @@ func (in *UserStatus) DeepCopyInto(out *UserStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. diff --git a/pkg/apis/network/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/network/v1alpha1/zz_generated.deepcopy.go index ae35ee8ab..71e01062f 100644 --- a/pkg/apis/network/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/network/v1alpha1/zz_generated.deepcopy.go @@ -16,16 +16,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - numorstring "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1/numorstring" + "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1/numorstring" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -56,7 +56,6 @@ func (in *EntityRule) DeepCopyInto(out *EntityRule) { *out = new(ServiceAccountMatch) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntityRule. @@ -82,7 +81,6 @@ func (in *HTTPMatch) DeepCopyInto(out *HTTPMatch) { *out = make([]HTTPPath, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatch. @@ -98,7 +96,6 @@ func (in *HTTPMatch) DeepCopy() *HTTPMatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPPath) DeepCopyInto(out *HTTPPath) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPath. @@ -124,7 +121,6 @@ func (in *ICMPFields) DeepCopyInto(out *ICMPFields) { *out = new(int) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPFields. @@ -143,7 +139,6 @@ func (in *NamespaceNetworkPolicy) DeepCopyInto(out *NamespaceNetworkPolicy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceNetworkPolicy. @@ -176,7 +171,6 @@ func (in *NamespaceNetworkPolicyList) DeepCopyInto(out *NamespaceNetworkPolicyLi (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceNetworkPolicyList. @@ -224,7 +218,6 @@ func (in *NamespaceNetworkPolicySpec) DeepCopyInto(out *NamespaceNetworkPolicySp *out = make([]PolicyType, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceNetworkPolicySpec. @@ -272,7 +265,6 @@ func (in *Rule) DeepCopyInto(out *Rule) { *out = new(HTTPMatch) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. @@ -293,7 +285,6 @@ func (in *ServiceAccountMatch) DeepCopyInto(out *ServiceAccountMatch) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountMatch. @@ -313,7 +304,6 @@ func (in *WorkspaceNetworkPolicy) DeepCopyInto(out *WorkspaceNetworkPolicy) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicy. @@ -351,7 +341,6 @@ func (in *WorkspaceNetworkPolicyEgressRule) DeepCopyInto(out *WorkspaceNetworkPo (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyEgressRule. @@ -381,7 +370,6 @@ func (in *WorkspaceNetworkPolicyIngressRule) DeepCopyInto(out *WorkspaceNetworkP (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyIngressRule. @@ -406,7 +394,6 @@ func (in *WorkspaceNetworkPolicyList) DeepCopyInto(out *WorkspaceNetworkPolicyLi (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyList. @@ -436,7 +423,6 @@ func (in *WorkspaceNetworkPolicyPeer) DeepCopyInto(out *WorkspaceNetworkPolicyPe *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyPeer. @@ -471,7 +457,6 @@ func (in *WorkspaceNetworkPolicySpec) DeepCopyInto(out *WorkspaceNetworkPolicySp (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicySpec. @@ -487,7 +472,6 @@ func (in *WorkspaceNetworkPolicySpec) DeepCopy() *WorkspaceNetworkPolicySpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceNetworkPolicyStatus) DeepCopyInto(out *WorkspaceNetworkPolicyStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceNetworkPolicyStatus. diff --git a/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go index 23e36c5f8..c5a810aa5 100644 --- a/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/servicemesh/v1alpha2/zz_generated.deepcopy.go @@ -16,12 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha2 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,7 +30,6 @@ func (in *DestinationRuleSpecTemplate) DeepCopyInto(out *DestinationRuleSpecTemp *out = *in in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleSpecTemplate. @@ -50,7 +49,6 @@ func (in *ServicePolicy) DeepCopyInto(out *ServicePolicy) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicy. @@ -76,7 +74,6 @@ func (in *ServicePolicyCondition) DeepCopyInto(out *ServicePolicyCondition) { *out = *in in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicyCondition. @@ -101,7 +98,6 @@ func (in *ServicePolicyList) DeepCopyInto(out *ServicePolicyList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicyList. @@ -131,7 +127,6 @@ func (in *ServicePolicySpec) DeepCopyInto(out *ServicePolicySpec) { (*in).DeepCopyInto(*out) } in.Template.DeepCopyInto(&out.Template) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicySpec. @@ -162,7 +157,6 @@ func (in *ServicePolicyStatus) DeepCopyInto(out *ServicePolicyStatus) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePolicyStatus. @@ -182,7 +176,6 @@ func (in *Strategy) DeepCopyInto(out *Strategy) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Strategy. @@ -208,7 +201,6 @@ func (in *StrategyCondition) DeepCopyInto(out *StrategyCondition) { *out = *in in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyCondition. @@ -233,7 +225,6 @@ func (in *StrategyList) DeepCopyInto(out *StrategyList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList. @@ -263,7 +254,6 @@ func (in *StrategySpec) DeepCopyInto(out *StrategySpec) { (*in).DeepCopyInto(*out) } in.Template.DeepCopyInto(&out.Template) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategySpec. @@ -294,7 +284,6 @@ func (in *StrategyStatus) DeepCopyInto(out *StrategyStatus) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyStatus. @@ -312,7 +301,6 @@ func (in *VirtualServiceTemplateSpec) DeepCopyInto(out *VirtualServiceTemplateSp *out = *in in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceTemplateSpec. diff --git a/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go index b50a8d568..a9319670d 100644 --- a/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 @@ -31,7 +31,6 @@ func (in *Workspace) DeepCopyInto(out *Workspace) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. @@ -64,7 +63,6 @@ func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. @@ -88,7 +86,6 @@ func (in *WorkspaceList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. @@ -104,7 +101,6 @@ func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index ea44d915b..f71f7ce58 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -94,7 +94,7 @@ type APIServer struct { // monitoring client set MonitoringClient monitoring.Interface - // + // openpitrix client OpenpitrixClient openpitrix.Client // @@ -188,8 +188,10 @@ func (s *APIServer) buildHandlerChain() { handler := s.Server.Handler handler = filters.WithKubeAPIServer(handler, s.KubernetesClient.Config(), &errorResponder{}) - clusterDispatcher := dispatch.NewClusterDispatch(s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Agents().Lister(), s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Lister()) - handler = filters.WithMultipleClusterDispatcher(handler, clusterDispatcher) + if s.Config.MultiClusterOptions.Enable { + clusterDispatcher := dispatch.NewClusterDispatch(s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters().Lister()) + handler = filters.WithMultipleClusterDispatcher(handler, clusterDispatcher) + } excludedPaths := []string{"/oauth/*", "/kapis/config.kubesphere.io/*"} pathAuthorizer, _ := path.NewAuthorizer(excludedPaths) @@ -284,6 +286,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error { {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "roles"}, {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "rolebindings"}, {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "policyrules"}, + {Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "clusters"}, } devopsGVRs := []schema.GroupVersionResource{ @@ -332,7 +335,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error { if !isResourceExists(gvr) { klog.Warningf("resource %s not exists in the cluster", gvr) } else { - _, err := appInformerFactory.ForResource(gvr) + _, err = appInformerFactory.ForResource(gvr) if err != nil { return err } diff --git a/pkg/apiserver/config/config.go b/pkg/apiserver/config/config.go index a9c4bd48a..1346699e3 100644 --- a/pkg/apiserver/config/config.go +++ b/pkg/apiserver/config/config.go @@ -11,6 +11,7 @@ import ( "kubesphere.io/kubesphere/pkg/simple/client/ldap" "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch" "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus" + "kubesphere.io/kubesphere/pkg/simple/client/multicluster" "kubesphere.io/kubesphere/pkg/simple/client/network" "kubesphere.io/kubesphere/pkg/simple/client/notification" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" @@ -63,13 +64,14 @@ type Config struct { KubernetesOptions *k8s.KubernetesOptions `json:"kubernetes,omitempty" yaml:"kubernetes,omitempty" mapstructure:"kubernetes"` ServiceMeshOptions *servicemesh.Options `json:"servicemesh,omitempty" yaml:"servicemesh,omitempty" mapstructure:"servicemesh"` NetworkOptions *network.Options `json:"network,omitempty" yaml:"network,omitempty" mapstructure:"network"` - LdapOptions *ldap.Options `json:"ldap,omitempty" yaml:"ldap,omitempty" mapstructure:"ldap"` - RedisOptions *cache.Options `json:"redis,omitempty" yaml:"redis,omitempty" mapstructure:"redis"` + LdapOptions *ldap.Options `json:"-" yaml:"ldap,omitempty" mapstructure:"ldap"` + RedisOptions *cache.Options `json:"-" yaml:"redis,omitempty" mapstructure:"redis"` S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"` OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"` MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"` LoggingOptions *elasticsearch.Options `json:"logging,omitempty" yaml:"logging,omitempty" mapstructure:"logging"` - AuthenticationOptions *authoptions.AuthenticationOptions `json:"authentication,omitempty" yaml:"authentication,omitempty" mapstructure:"authentication"` + AuthenticationOptions *authoptions.AuthenticationOptions `json:"-" yaml:"authentication,omitempty" mapstructure:"authentication"` + MultiClusterOptions *multicluster.Options `json:"multicluster,omitempty" yaml:"multicluster,omitempty" mapstructure:"multicluster"` // Options used for enabling components, not actually used now. Once we switch Alerting/Notification API to kubesphere, // we can add these options to kubesphere command lines AlertingOptions *alerting.Options `json:"alerting,omitempty" yaml:"alerting,omitempty" mapstructure:"alerting"` @@ -204,4 +206,8 @@ func (conf *Config) stripEmptyOptions() { conf.NotificationOptions = nil } + if conf.MultiClusterOptions != nil && !conf.MultiClusterOptions.Enable { + conf.MultiClusterOptions = nil + } + } diff --git a/pkg/apiserver/config/config_test.go b/pkg/apiserver/config/config_test.go index 82f94a944..376d7658f 100644 --- a/pkg/apiserver/config/config_test.go +++ b/pkg/apiserver/config/config_test.go @@ -14,6 +14,7 @@ import ( "kubesphere.io/kubesphere/pkg/simple/client/ldap" "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch" "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus" + "kubesphere.io/kubesphere/pkg/simple/client/multicluster" "kubesphere.io/kubesphere/pkg/simple/client/network" "kubesphere.io/kubesphere/pkg/simple/client/notification" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" @@ -118,6 +119,9 @@ func newTestConfig() (*Config, error) { AccessTokenInactivityTimeout: 0, }, }, + MultiClusterOptions: &multicluster.Options{ + Enable: false, + }, } return conf, nil } diff --git a/pkg/apiserver/dispatch/dispatch.go b/pkg/apiserver/dispatch/dispatch.go index 9c8e1cbc9..b90bbd476 100644 --- a/pkg/apiserver/dispatch/dispatch.go +++ b/pkg/apiserver/dispatch/dispatch.go @@ -11,6 +11,7 @@ import ( "kubesphere.io/kubesphere/pkg/apiserver/request" "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" "net/http" + "net/url" "strings" ) @@ -20,13 +21,11 @@ type Dispatcher interface { } type clusterDispatch struct { - agentLister v1alpha1.AgentLister clusterLister v1alpha1.ClusterLister } -func NewClusterDispatch(agentLister v1alpha1.AgentLister, clusterLister v1alpha1.ClusterLister) Dispatcher { +func NewClusterDispatch(clusterLister v1alpha1.ClusterLister) Dispatcher { return &clusterDispatch{ - agentLister: agentLister, clusterLister: clusterLister, } } @@ -58,23 +57,19 @@ func (c *clusterDispatch) Dispatch(w http.ResponseWriter, req *http.Request, han return } - agent, err := c.agentLister.Get(info.Cluster) - if err != nil { - if errors.IsNotFound(err) { - http.Error(w, fmt.Sprintf("cluster %s not found", info.Cluster), http.StatusNotFound) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - return - } - - if !isAgentReady(agent) { + if !isClusterReady(cluster) { http.Error(w, fmt.Sprintf("cluster agent is not ready"), http.StatusInternalServerError) return } + endpoint, err := url.Parse(cluster.Spec.Connection.KubeSphereAPIEndpoint) + if err != nil { + klog.Error(err) + http.Error(w, err.Error(), http.StatusInternalServerError) + } + u := *req.URL - u.Host = agent.Spec.Proxy + u.Host = endpoint.Host u.Path = strings.Replace(u.Path, fmt.Sprintf("/clusters/%s", info.Cluster), "", 1) httpProxy := proxy.NewUpgradeAwareHandler(&u, http.DefaultTransport, true, false, c) @@ -85,9 +80,9 @@ func (c *clusterDispatch) Error(w http.ResponseWriter, req *http.Request, err er responsewriters.InternalError(w, req, err) } -func isAgentReady(agent *clusterv1alpha1.Agent) bool { - for _, condition := range agent.Status.Conditions { - if condition.Type == clusterv1alpha1.AgentConnected && condition.Status == corev1.ConditionTrue { +func isClusterReady(cluster *clusterv1alpha1.Cluster) bool { + for _, condition := range cluster.Status.Conditions { + if condition.Type == clusterv1alpha1.ClusterReady && condition.Status == corev1.ConditionTrue { return true } } @@ -95,7 +90,6 @@ func isAgentReady(agent *clusterv1alpha1.Agent) bool { return false } -// func isClusterHostCluster(cluster *clusterv1alpha1.Cluster) bool { for key, value := range cluster.Annotations { if key == clusterv1alpha1.IsHostCluster && value == "true" { diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/agent.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/agent.go deleted file mode 100644 index 0fa113684..000000000 --- a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/agent.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2019 The KubeSphere authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" - scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" -) - -// AgentsGetter has a method to return a AgentInterface. -// A group's client should implement this interface. -type AgentsGetter interface { - Agents() AgentInterface -} - -// AgentInterface has methods to work with Agent resources. -type AgentInterface interface { - Create(*v1alpha1.Agent) (*v1alpha1.Agent, error) - Update(*v1alpha1.Agent) (*v1alpha1.Agent, error) - UpdateStatus(*v1alpha1.Agent) (*v1alpha1.Agent, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Agent, error) - List(opts v1.ListOptions) (*v1alpha1.AgentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) - AgentExpansion -} - -// agents implements AgentInterface -type agents struct { - client rest.Interface -} - -// newAgents returns a Agents -func newAgents(c *ClusterV1alpha1Client) *agents { - return &agents{ - client: c.RESTClient(), - } -} - -// Get takes name of the agent, and returns the corresponding agent object, and an error if there is any. -func (c *agents) Get(name string, options v1.GetOptions) (result *v1alpha1.Agent, err error) { - result = &v1alpha1.Agent{} - err = c.client.Get(). - Resource("agents"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Agents that match those selectors. -func (c *agents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.AgentList{} - err = c.client.Get(). - Resource("agents"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested agents. -func (c *agents) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("agents"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a agent and creates it. Returns the server's representation of the agent, and an error, if there is any. -func (c *agents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { - result = &v1alpha1.Agent{} - err = c.client.Post(). - Resource("agents"). - Body(agent). - Do(). - Into(result) - return -} - -// Update takes the representation of a agent and updates it. Returns the server's representation of the agent, and an error, if there is any. -func (c *agents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { - result = &v1alpha1.Agent{} - err = c.client.Put(). - Resource("agents"). - Name(agent.Name). - Body(agent). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *agents) UpdateStatus(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { - result = &v1alpha1.Agent{} - err = c.client.Put(). - Resource("agents"). - Name(agent.Name). - SubResource("status"). - Body(agent). - Do(). - Into(result) - return -} - -// Delete takes name of the agent and deletes it. Returns an error if one occurs. -func (c *agents) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("agents"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *agents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("agents"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched agent. -func (c *agents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) { - result = &v1alpha1.Agent{} - err = c.client.Patch(pt). - Resource("agents"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go index 12b7dd159..fef8f441c 100644 --- a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go @@ -26,7 +26,6 @@ import ( type ClusterV1alpha1Interface interface { RESTClient() rest.Interface - AgentsGetter ClustersGetter } @@ -35,10 +34,6 @@ type ClusterV1alpha1Client struct { restClient rest.Interface } -func (c *ClusterV1alpha1Client) Agents() AgentInterface { - return newAgents(c) -} - func (c *ClusterV1alpha1Client) Clusters() ClusterInterface { return newClusters(c) } diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_agent.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_agent.go deleted file mode 100644 index e08a54c4d..000000000 --- a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_agent.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2019 The KubeSphere authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" -) - -// FakeAgents implements AgentInterface -type FakeAgents struct { - Fake *FakeClusterV1alpha1 -} - -var agentsResource = schema.GroupVersionResource{Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "agents"} - -var agentsKind = schema.GroupVersionKind{Group: "cluster.kubesphere.io", Version: "v1alpha1", Kind: "Agent"} - -// Get takes name of the agent, and returns the corresponding agent object, and an error if there is any. -func (c *FakeAgents) Get(name string, options v1.GetOptions) (result *v1alpha1.Agent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(agentsResource, name), &v1alpha1.Agent{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Agent), err -} - -// List takes label and field selectors, and returns the list of Agents that match those selectors. -func (c *FakeAgents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(agentsResource, agentsKind, opts), &v1alpha1.AgentList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.AgentList{ListMeta: obj.(*v1alpha1.AgentList).ListMeta} - for _, item := range obj.(*v1alpha1.AgentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested agents. -func (c *FakeAgents) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(agentsResource, opts)) -} - -// Create takes the representation of a agent and creates it. Returns the server's representation of the agent, and an error, if there is any. -func (c *FakeAgents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(agentsResource, agent), &v1alpha1.Agent{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Agent), err -} - -// Update takes the representation of a agent and updates it. Returns the server's representation of the agent, and an error, if there is any. -func (c *FakeAgents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(agentsResource, agent), &v1alpha1.Agent{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Agent), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeAgents) UpdateStatus(agent *v1alpha1.Agent) (*v1alpha1.Agent, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(agentsResource, "status", agent), &v1alpha1.Agent{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Agent), err -} - -// Delete takes name of the agent and deletes it. Returns an error if one occurs. -func (c *FakeAgents) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(agentsResource, name), &v1alpha1.Agent{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeAgents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(agentsResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.AgentList{}) - return err -} - -// Patch applies the patch and returns the patched agent. -func (c *FakeAgents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(agentsResource, name, pt, data, subresources...), &v1alpha1.Agent{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Agent), err -} diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go index bea6f6387..8168d68fb 100644 --- a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go @@ -28,10 +28,6 @@ type FakeClusterV1alpha1 struct { *testing.Fake } -func (c *FakeClusterV1alpha1) Agents() v1alpha1.AgentInterface { - return &FakeAgents{c} -} - func (c *FakeClusterV1alpha1) Clusters() v1alpha1.ClusterInterface { return &FakeClusters{c} } diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go index d87ae7ed2..8a541985e 100644 --- a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go @@ -18,6 +18,4 @@ limitations under the License. package v1alpha1 -type AgentExpansion interface{} - type ClusterExpansion interface{} diff --git a/pkg/client/informers/externalversions/cluster/v1alpha1/agent.go b/pkg/client/informers/externalversions/cluster/v1alpha1/agent.go deleted file mode 100644 index 5db92dcf3..000000000 --- a/pkg/client/informers/externalversions/cluster/v1alpha1/agent.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2019 The KubeSphere authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" - versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" - internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" -) - -// AgentInformer provides access to a shared informer and lister for -// Agents. -type AgentInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.AgentLister -} - -type agentInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewAgentInformer constructs a new informer for Agent type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewAgentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredAgentInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredAgentInformer constructs a new informer for Agent type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredAgentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Agents().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Agents().Watch(options) - }, - }, - &clusterv1alpha1.Agent{}, - resyncPeriod, - indexers, - ) -} - -func (f *agentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredAgentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *agentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.Agent{}, f.defaultInformer) -} - -func (f *agentInformer) Lister() v1alpha1.AgentLister { - return v1alpha1.NewAgentLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/informers/externalversions/cluster/v1alpha1/interface.go b/pkg/client/informers/externalversions/cluster/v1alpha1/interface.go index 48aa93887..383f79e85 100644 --- a/pkg/client/informers/externalversions/cluster/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/cluster/v1alpha1/interface.go @@ -24,8 +24,6 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // Agents returns a AgentInformer. - Agents() AgentInformer // Clusters returns a ClusterInformer. Clusters() ClusterInformer } @@ -41,11 +39,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// Agents returns a AgentInformer. -func (v *version) Agents() AgentInformer { - return &agentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // Clusters returns a ClusterInformer. func (v *version) Clusters() ClusterInformer { return &clusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 46796b88f..34fb32f12 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -59,8 +59,6 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=cluster.kubesphere.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("agents"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Agents().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("clusters"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Clusters().Informer()}, nil diff --git a/pkg/client/listers/cluster/v1alpha1/agent.go b/pkg/client/listers/cluster/v1alpha1/agent.go deleted file mode 100644 index 55326bbee..000000000 --- a/pkg/client/listers/cluster/v1alpha1/agent.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2019 The KubeSphere authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" -) - -// AgentLister helps list Agents. -type AgentLister interface { - // List lists all Agents in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.Agent, err error) - // Get retrieves the Agent from the index for a given name. - Get(name string) (*v1alpha1.Agent, error) - AgentListerExpansion -} - -// agentLister implements the AgentLister interface. -type agentLister struct { - indexer cache.Indexer -} - -// NewAgentLister returns a new AgentLister. -func NewAgentLister(indexer cache.Indexer) AgentLister { - return &agentLister{indexer: indexer} -} - -// List lists all Agents in the indexer. -func (s *agentLister) List(selector labels.Selector) (ret []*v1alpha1.Agent, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Agent)) - }) - return ret, err -} - -// Get retrieves the Agent from the index for a given name. -func (s *agentLister) Get(name string) (*v1alpha1.Agent, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("agent"), name) - } - return obj.(*v1alpha1.Agent), nil -} diff --git a/pkg/client/listers/cluster/v1alpha1/expansion_generated.go b/pkg/client/listers/cluster/v1alpha1/expansion_generated.go index 0fc0286f9..2711d0848 100644 --- a/pkg/client/listers/cluster/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/cluster/v1alpha1/expansion_generated.go @@ -18,10 +18,6 @@ limitations under the License. package v1alpha1 -// AgentListerExpansion allows custom methods to be added to -// AgentLister. -type AgentListerExpansion interface{} - // ClusterListerExpansion allows custom methods to be added to // ClusterLister. type ClusterListerExpansion interface{} diff --git a/pkg/controller/cluster/cluster_controller.go b/pkg/controller/cluster/cluster_controller.go index c46a7926e..3d0e7044f 100644 --- a/pkg/controller/cluster/cluster_controller.go +++ b/pkg/controller/cluster/cluster_controller.go @@ -3,23 +3,30 @@ package cluster import ( "fmt" v1 "k8s.io/api/core/v1" + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog" clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" clusterclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1" clusterinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1" clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" + "math/rand" + "reflect" + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" "time" ) @@ -30,17 +37,31 @@ const ( // // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s maxRetries = 15 + + kubefedNamespace = "kube-federation-system" + + hostClusterName = "kubesphere" + + // allocate kubernetesAPIServer port in range [portRangeMin, portRangeMax] for agents if port is not specified + // kubesphereAPIServer port is defaulted to kubernetesAPIServerPort + 10000 + portRangeMin = 6000 + portRangeMax = 7000 + + // Service port + kubernetesPort = 6443 + kubespherePort = 80 + + defaultAgentNamespace = "kubesphere-system" ) type ClusterController struct { eventBroadcaster record.EventBroadcaster eventRecorder record.EventRecorder - agentClient clusterclient.AgentInterface - clusterClient clusterclient.ClusterInterface + client kubernetes.Interface + hostConfig *rest.Config - agentLister clusterlister.AgentLister - agentHasSynced cache.InformerSynced + clusterClient clusterclient.ClusterInterface clusterLister clusterlister.ClusterLister clusterHasSynced cache.InformerSynced @@ -52,9 +73,8 @@ type ClusterController struct { func NewClusterController( client kubernetes.Interface, + config *rest.Config, clusterInformer clusterinformer.ClusterInformer, - agentInformer clusterinformer.AgentInformer, - agentClient clusterclient.AgentInterface, clusterClient clusterclient.ClusterInterface, ) *ClusterController { @@ -62,38 +82,35 @@ func NewClusterController( broadcaster.StartLogging(func(format string, args ...interface{}) { klog.Info(fmt.Sprintf(format, args)) }) - broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) + broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cluster-controller"}) c := &ClusterController{ eventBroadcaster: broadcaster, eventRecorder: recorder, - agentClient: agentClient, + client: client, + hostConfig: config, clusterClient: clusterClient, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cluster"), workerLoopPeriod: time.Second, } - c.agentLister = agentInformer.Lister() - c.agentHasSynced = agentInformer.Informer().HasSynced - c.clusterLister = clusterInformer.Lister() c.clusterHasSynced = clusterInformer.Informer().HasSynced clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.addCluster, UpdateFunc: func(oldObj, newObj interface{}) { + newCluster := newObj.(*clusterv1alpha1.Cluster) + oldCluster := oldObj.(*clusterv1alpha1.Cluster) + if newCluster.ResourceVersion == oldCluster.ResourceVersion { + return + } c.addCluster(newObj) }, DeleteFunc: c.addCluster, }) - agentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: nil, - UpdateFunc: nil, - DeleteFunc: nil, - }) - return c } @@ -108,7 +125,7 @@ func (c *ClusterController) Run(workers int, stopCh <-chan struct{}) error { klog.V(0).Info("starting cluster controller") defer klog.Info("shutting down cluster controller") - if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced, c.agentHasSynced) { + if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced) { return fmt.Errorf("failed to wait for caches to sync") } @@ -156,87 +173,211 @@ func (c *ClusterController) syncCluster(key string) error { // cluster not found, possibly been deleted // need to do the cleanup if errors.IsNotFound(err) { - _, err = c.agentLister.Get(name) - if err != nil && errors.IsNotFound(err) { - return nil - } - - if err != nil { - klog.Errorf("Failed to get cluster agent %s, %#v", name, err) - return err - } - - // do the real cleanup work - err = c.agentClient.Delete(name, &metav1.DeleteOptions{}) - return err + return nil } klog.Errorf("Failed to get cluster with name %s, %#v", name, err) return err } - newAgent := &clusterv1alpha1.Agent{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "app.kubernetes.io/name": "tower", - "cluster.kubesphere.io/name": name, - }, - }, - Spec: clusterv1alpha1.AgentSpec{ - Token: "", - KubeSphereAPIServerPort: 0, - KubernetesAPIServerPort: 0, - Proxy: "", - Paused: !cluster.Spec.Active, - }, - } + // proxy service name if needed + serviceName := fmt.Sprintf("mc-%s", cluster.Name) - agent, err := c.agentLister.Get(name) - if err != nil && errors.IsNotFound(err) { - agent, err = c.agentClient.Create(newAgent) - if err != nil { - klog.Errorf("Failed to create agent %s, %#v", name, err) - return err + if cluster.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) { + cluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterv1alpha1.Finalizer) + if cluster, err = c.clusterClient.Update(cluster); err != nil { + return err + } } + } else { + // The object is being deleted + if sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) { + // need to unJoin federation first, before there are + // some cleanup work to do in member cluster which depends + // agent to proxy traffic + err = c.unJoinFederation(nil, name) + if err != nil { + klog.Errorf("Failed to unjoin federation for cluster %s, error %v", name, err) + return err + } + _, err = c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + // nothing to do + } else { + klog.Errorf("Failed to get proxy service %s, error %v", serviceName, err) + return err + } + } else { + err = c.client.CoreV1().Services(defaultAgentNamespace).Delete(serviceName, metav1.NewDeleteOptions(0)) + if err != nil { + klog.Errorf("Unable to delete service %s, error %v", serviceName, err) + return err + } + } + + finalizers := sets.NewString(cluster.ObjectMeta.Finalizers...) + finalizers.Delete(clusterv1alpha1.Finalizer) + cluster.ObjectMeta.Finalizers = finalizers.List() + if _, err = c.clusterClient.Update(cluster); err != nil { + return err + } + } return nil } + oldCluster := cluster.DeepCopy() + + // prepare for proxy to member cluster + if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy { + if cluster.Spec.Connection.KubeSphereAPIServerPort == 0 || + cluster.Spec.Connection.KubernetesAPIServerPort == 0 { + port, err := c.allocatePort() + if err != nil { + klog.Error(err) + return err + } + + cluster.Spec.Connection.KubernetesAPIServerPort = port + cluster.Spec.Connection.KubeSphereAPIServerPort = port + 10000 + } + + // token uninitialized, generate a new token + if len(cluster.Spec.Connection.Token) == 0 { + cluster.Spec.Connection.Token = c.generateToken() + } + + mcService := v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: cluster.Namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": serviceName, + "app": serviceName, + }, + }, + Spec: v1.ServiceSpec{ + Selector: map[string]string{ + "app.kubernetes.io/name": "tower", + "app": "tower", + }, + Ports: []v1.ServicePort{ + { + Name: "kubernetes", + Protocol: v1.ProtocolTCP, + Port: kubernetesPort, + TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubernetesAPIServerPort)), + }, + { + Name: "kubesphere", + Protocol: v1.ProtocolTCP, + Port: kubespherePort, + TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubeSphereAPIServerPort)), + }, + }, + }, + } + + service, err := c.client.CoreV1().Services(defaultAgentNamespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + service, err = c.client.CoreV1().Services(defaultAgentNamespace).Create(&mcService) + if err != nil { + return err + } + } + + return err + } else { + if !reflect.DeepEqual(service.Spec, mcService.Spec) { + mcService.ObjectMeta = service.ObjectMeta + mcService.Spec.ClusterIP = service.Spec.ClusterIP + + service, err = c.client.CoreV1().Services(defaultAgentNamespace).Update(&mcService) + if err != nil { + return err + } + } + } + + // populated the kubernetes apiEndpoint and kubesphere apiEndpoint + cluster.Spec.Connection.KubernetesAPIEndpoint = fmt.Sprintf("https://%s:%d", service.Spec.ClusterIP, kubernetesPort) + cluster.Spec.Connection.KubeSphereAPIEndpoint = fmt.Sprintf("http://%s:%d", service.Spec.ClusterIP, kubespherePort) + + if !reflect.DeepEqual(oldCluster.Spec, cluster.Spec) { + cluster, err = c.clusterClient.Update(cluster) + if err != nil { + klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err) + return err + } + return nil + } + } + + if len(cluster.Spec.Connection.KubeConfig) == 0 { + return nil + } + + var clientSet kubernetes.Interface + var clusterConfig *rest.Config + + // prepare for + clientConfig, err := clientcmd.NewClientConfigFromBytes(cluster.Spec.Connection.KubeConfig) if err != nil { - klog.Errorf("Failed to get agent %s, %#v", name, err) + klog.Errorf("Unable to create client config from kubeconfig bytes, %#v", err) return err } - if agent.Spec.Paused != newAgent.Spec.Paused { - agent.Spec.Paused = newAgent.Spec.Paused - return retry.RetryOnConflict(retry.DefaultBackoff, func() error { - _, err = c.agentClient.Update(agent) - return err - }) + clusterConfig, err = clientConfig.ClientConfig() + if err != nil { + klog.Errorf("Failed to get client config, %#v", err) + return err } - // agent connection is ready, update cluster status - // set - if len(agent.Status.KubeConfig) != 0 && c.isAgentReady(agent) { - clientConfig, err := clientcmd.NewClientConfigFromBytes(agent.Status.KubeConfig) + clientSet, err = kubernetes.NewForConfig(clusterConfig) + if err != nil { + klog.Errorf("Failed to create ClientSet from config, %#v", err) + return nil + } + + if !cluster.Spec.JoinFederation { // trying to unJoin federation + err = c.unJoinFederation(clusterConfig, cluster.Name) if err != nil { - klog.Errorf("Unable to create client config from kubeconfig bytes, %#v", err) + klog.Errorf("Failed to unJoin federation for cluster %s, error %v", cluster.Name, err) + c.eventRecorder.Event(cluster, v1.EventTypeWarning, "UnJoinFederation", err.Error()) return err } - - config, err := clientConfig.ClientConfig() + } else { // join federation + _, err = c.joinFederation(clusterConfig, cluster.Name, cluster.Labels) if err != nil { - klog.Errorf("Failed to get client config, %#v", err) + klog.Errorf("Failed to join federation for cluster %s, error %v", cluster.Name, err) + c.eventRecorder.Event(cluster, v1.EventTypeWarning, "JoinFederation", err.Error()) return err } + c.eventRecorder.Event(cluster, v1.EventTypeNormal, "JoinFederation", "Cluster has joined federation.") - clientSet, err := kubernetes.NewForConfig(config) - if err != nil { - klog.Errorf("Failed to create ClientSet from config, %#v", err) - return nil + federationReadyCondition := clusterv1alpha1.ClusterCondition{ + Type: clusterv1alpha1.ClusterFederated, + Status: v1.ConditionTrue, + LastUpdateTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: "", + Message: "Cluster has joined federation control plane successfully", } + c.updateClusterCondition(cluster, federationReadyCondition) + } + + // cluster agent is ready, we can pull kubernetes cluster info through agent + // since there is no agent necessary for host cluster, so updates for host cluster + // is safe. + if isConditionTrue(cluster, clusterv1alpha1.ClusterAgentAvailable) || + cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeDirect { version, err := clientSet.Discovery().ServerVersion() if err != nil { klog.Errorf("Failed to get kubernetes version, %#v", err) @@ -252,28 +393,25 @@ func (c *ClusterController) syncCluster(key string) error { } cluster.Status.NodeCount = len(nodes.Items) + + clusterReadyCondition := clusterv1alpha1.ClusterCondition{ + Type: clusterv1alpha1.ClusterReady, + Status: v1.ConditionTrue, + LastUpdateTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: string(clusterv1alpha1.ClusterReady), + Message: "Cluster is available now", + } + + c.updateClusterCondition(cluster, clusterReadyCondition) } - agentReadyCondition := clusterv1alpha1.ClusterCondition{ - Type: clusterv1alpha1.ClusterAgentAvailable, - LastUpdateTime: metav1.NewTime(time.Now()), - LastTransitionTime: metav1.NewTime(time.Now()), - Reason: "", - Message: "Cluster agent is available now.", - } - - if c.isAgentReady(agent) { - agentReadyCondition.Status = v1.ConditionTrue - } else { - agentReadyCondition.Status = v1.ConditionFalse - } - - c.addClusterCondition(cluster, agentReadyCondition) - - _, err = c.clusterClient.Update(cluster) - if err != nil { - klog.Errorf("Failed to update cluster status, %#v", err) - return err + if !reflect.DeepEqual(oldCluster, cluster) { + _, err = c.clusterClient.Update(cluster) + if err != nil { + klog.Errorf("Failed to update cluster status, %#v", err) + return err + } } return nil @@ -298,50 +436,126 @@ func (c *ClusterController) handleErr(err error, key interface{}) { } if c.queue.NumRequeues(key) < maxRetries { - klog.V(2).Infof("Error syncing virtualservice %s for service retrying, %#v", key, err) + klog.V(2).Infof("Error syncing cluster %s, retrying, %v", key, err) c.queue.AddRateLimited(key) return } - klog.V(4).Infof("Dropping service %s out of the queue.", key) + klog.V(4).Infof("Dropping cluster %s out of the queue.", key) c.queue.Forget(key) utilruntime.HandleError(err) } -func (c *ClusterController) addAgent(obj interface{}) { - agent := obj.(*clusterv1alpha1.Agent) - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("get agent key %s failed", agent.Name)) - return - } - - c.queue.Add(key) -} - -func (c *ClusterController) isAgentReady(agent *clusterv1alpha1.Agent) bool { - for _, condition := range agent.Status.Conditions { - if condition.Type == clusterv1alpha1.AgentConnected && condition.Status == v1.ConditionTrue { +func isConditionTrue(cluster *clusterv1alpha1.Cluster, conditionType clusterv1alpha1.ClusterConditionType) bool { + for _, condition := range cluster.Status.Conditions { + if condition.Type == conditionType && condition.Status == v1.ConditionTrue { return true } } return false } -// addClusterCondition add condition -func (c *ClusterController) addClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) { +// updateClusterCondition updates condition in cluster conditions using giving condition +// adds condition if not existed +func (c *ClusterController) updateClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) { if cluster.Status.Conditions == nil { cluster.Status.Conditions = make([]clusterv1alpha1.ClusterCondition, 0) } newConditions := make([]clusterv1alpha1.ClusterCondition, 0) + needToUpdate := true for _, cond := range cluster.Status.Conditions { if cond.Type == condition.Type { - continue + if cond.Status == condition.Status { + needToUpdate = false + continue + } else { + newConditions = append(newConditions, cond) + } } newConditions = append(newConditions, cond) } - newConditions = append(newConditions, condition) - cluster.Status.Conditions = newConditions + if needToUpdate { + newConditions = append(newConditions, condition) + cluster.Status.Conditions = newConditions + } +} + +func isHostCluster(cluster *clusterv1alpha1.Cluster) bool { + for k, v := range cluster.Annotations { + if k == clusterv1alpha1.IsHostCluster && v == "true" { + return true + } + } + + return false +} + +// joinFederation joins a cluster into federation clusters. +// return nil error if kubefed cluster already exists. +func (c *ClusterController) joinFederation(clusterConfig *rest.Config, joiningClusterName string, labels map[string]string) (*fedv1b1.KubeFedCluster, error) { + + return joinClusterForNamespace(c.hostConfig, + clusterConfig, + kubefedNamespace, + kubefedNamespace, + hostClusterName, + joiningClusterName, + fmt.Sprintf("%s-secret", joiningClusterName), + labels, + apiextv1b1.ClusterScoped, + false, + false) +} + +// unJoinFederation unjoins a cluster from federation control plane. +func (c *ClusterController) unJoinFederation(clusterConfig *rest.Config, unjoiningClusterName string) error { + return unjoinCluster(c.hostConfig, + clusterConfig, + kubefedNamespace, + hostClusterName, + unjoiningClusterName, + true, + false) +} + +// allocatePort find a available port between [portRangeMin, portRangeMax] in maximumRetries +// TODO: only works with handful clusters +func (c *ClusterController) allocatePort() (uint16, error) { + rand.Seed(time.Now().UnixNano()) + + clusters, err := c.clusterLister.List(labels.Everything()) + if err != nil { + return 0, err + } + + const maximumRetries = 10 + for i := 0; i < maximumRetries; i++ { + collision := false + port := uint16(portRangeMin + rand.Intn(portRangeMax-portRangeMin+1)) + + for _, item := range clusters { + if item.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy && + item.Spec.Connection.KubernetesAPIServerPort != 0 && + item.Spec.Connection.KubeSphereAPIServerPort == port { + collision = true + break + } + } + + if !collision { + return port, nil + } + } + + return 0, fmt.Errorf("unable to allocate port after %d retries", maximumRetries) +} + +// generateToken returns a random 32-byte string as token +func (c *ClusterController) generateToken() string { + rand.Seed(time.Now().UnixNano()) + b := make([]byte, 32) + rand.Read(b) + return fmt.Sprintf("%x", b) } diff --git a/pkg/controller/cluster/helper.go b/pkg/controller/cluster/helper.go new file mode 100644 index 000000000..916b1b53b --- /dev/null +++ b/pkg/controller/cluster/helper.go @@ -0,0 +1 @@ +package cluster diff --git a/pkg/controller/cluster/join.go b/pkg/controller/cluster/join.go new file mode 100644 index 000000000..f14a106f0 --- /dev/null +++ b/pkg/controller/cluster/join.go @@ -0,0 +1,720 @@ +package cluster + +import ( + "context" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog" + "reflect" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" + "time" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" +) + +var ( + // Policy rules allowing full access to resources in the cluster + // or namespace. + namespacedPolicyRules = []rbacv1.PolicyRule{ + { + Verbs: []string{rbacv1.VerbAll}, + APIGroups: []string{rbacv1.APIGroupAll}, + Resources: []string{rbacv1.ResourceAll}, + }, + } + clusterPolicyRules = []rbacv1.PolicyRule{ + namespacedPolicyRules[0], + { + NonResourceURLs: []string{rbacv1.NonResourceAll}, + Verbs: []string{"get"}, + }, + } +) + +const ( + tokenKey = "token" + serviceAccountSecretTimeout = 30 * time.Second +) + +// joinClusterForNamespace registers a cluster with a KubeFed control +// plane. The KubeFed namespace in the joining cluster is provided by +// the joiningNamespace parameter. +func joinClusterForNamespace(hostConfig, clusterConfig *rest.Config, kubefedNamespace, + joiningNamespace, hostClusterName, joiningClusterName, secretName string, labels map[string]string, + scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) { + + hostClientset, err := HostClientset(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get host cluster clientset: %v", err) + return nil, err + } + + clusterClientset, err := ClusterClientset(clusterConfig) + if err != nil { + klog.V(2).Infof("Failed to get joining cluster clientset: %v", err) + return nil, err + } + + client, err := genericclient.New(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get kubefed clientset: %v", err) + return nil, err + } + + klog.V(2).Infof("Performing preflight checks.") + err = performPreflightChecks(clusterClientset, joiningClusterName, hostClusterName, joiningNamespace, errorOnExisting) + if err != nil { + return nil, err + } + + klog.V(2).Infof("Creating %s namespace in joining cluster", joiningNamespace) + _, err = createKubeFedNamespace(clusterClientset, joiningNamespace, joiningClusterName, dryRun) + if err != nil { + klog.V(2).Infof("Error creating %s namespace in joining cluster: %v", joiningNamespace, err) + return nil, err + } + klog.V(2).Infof("Created %s namespace in joining cluster", joiningNamespace) + + saName, err := createAuthorizedServiceAccount(clusterClientset, joiningNamespace, joiningClusterName, hostClusterName, scope, dryRun, errorOnExisting) + if err != nil { + return nil, err + } + + secret, _, err := populateSecretInHostCluster(clusterClientset, hostClientset, + saName, kubefedNamespace, joiningNamespace, joiningClusterName, secretName, dryRun) + if err != nil { + klog.V(2).Infof("Error creating secret in host cluster: %s due to: %v", hostClusterName, err) + return nil, err + } + + var disabledTLSValidations []fedv1b1.TLSValidation + if clusterConfig.TLSClientConfig.Insecure { + disabledTLSValidations = append(disabledTLSValidations, fedv1b1.TLSAll) + } + + kubefedCluster, err := createKubeFedCluster(client, joiningClusterName, clusterConfig.Host, + secret.Name, kubefedNamespace, clusterConfig.CAData, disabledTLSValidations, labels, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Failed to create federated cluster resource: %v", err) + return nil, err + } + + klog.V(2).Info("Created federated cluster resource") + return kubefedCluster, nil +} + +// performPreflightChecks checks that the host and joining clusters are in +// a consistent state. +func performPreflightChecks(clusterClientset kubeclient.Interface, name, hostClusterName, + kubefedNamespace string, errorOnExisting bool) error { + // Make sure there is no existing service account in the joining cluster. + saName := util.ClusterServiceAccountName(name, hostClusterName) + _, err := clusterClientset.CoreV1().ServiceAccounts(kubefedNamespace).Get(saName, metav1.GetOptions{}) + + switch { + case apierrors.IsNotFound(err): + return nil + case err != nil: + return err + case errorOnExisting: + return errors.Errorf("service account: %s already exists in joining cluster: %s", saName, name) + default: + klog.V(2).Infof("Service account %s already exists in joining cluster %s", saName, name) + return nil + } +} + +// createKubeFedCluster creates a federated cluster resource that associates +// the cluster and secret. +func createKubeFedCluster(client genericclient.Client, joiningClusterName, apiEndpoint, + secretName, kubefedNamespace string, caBundle []byte, disabledTLSValidations []fedv1b1.TLSValidation, + labels map[string]string, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) { + fedCluster := &fedv1b1.KubeFedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kubefedNamespace, + Name: joiningClusterName, + Labels: labels, + }, + Spec: fedv1b1.KubeFedClusterSpec{ + APIEndpoint: apiEndpoint, + CABundle: caBundle, + SecretRef: fedv1b1.LocalSecretReference{ + Name: secretName, + }, + DisabledTLSValidations: disabledTLSValidations, + }, + } + + if dryRun { + return fedCluster, nil + } + + existingFedCluster := &fedv1b1.KubeFedCluster{} + err := client.Get(context.TODO(), existingFedCluster, kubefedNamespace, joiningClusterName) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not retrieve federated cluster %s due to %v", joiningClusterName, err) + return nil, err + case err == nil && errorOnExisting: + return nil, errors.Errorf("federated cluster %s already exists in host cluster", joiningClusterName) + case err == nil: + existingFedCluster.Spec = fedCluster.Spec + existingFedCluster.Labels = labels + err = client.Update(context.TODO(), existingFedCluster) + if err != nil { + klog.V(2).Infof("Could not update federated cluster %s due to %v", fedCluster.Name, err) + return nil, err + } + return existingFedCluster, nil + default: + err = client.Create(context.TODO(), fedCluster) + if err != nil { + klog.V(2).Infof("Could not create federated cluster %s due to %v", fedCluster.Name, err) + return nil, err + } + return fedCluster, nil + } +} + +// createKubeFedNamespace creates the kubefed namespace in the cluster +// associated with clusterClientset, if it doesn't already exist. +func createKubeFedNamespace(clusterClientset kubeclient.Interface, kubefedNamespace, + joiningClusterName string, dryRun bool) (*corev1.Namespace, error) { + fedNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubefedNamespace, + }, + } + + if dryRun { + return fedNamespace, nil + } + + _, err := clusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + klog.V(2).Infof("Could not get %s namespace: %v", kubefedNamespace, err) + return nil, err + } + + if err == nil { + klog.V(2).Infof("Already existing %s namespace", kubefedNamespace) + return fedNamespace, nil + } + + // Not found, so create. + _, err = clusterClientset.CoreV1().Namespaces().Create(fedNamespace) + if err != nil && !apierrors.IsAlreadyExists(err) { + klog.V(2).Infof("Could not create %s namespace: %v", kubefedNamespace, err) + return nil, err + } + return fedNamespace, nil +} + +// createAuthorizedServiceAccount creates a service account and grants +// the privileges required by the KubeFed control plane to manage +// resources in the joining cluster. The name of the created service +// account is returned on success. +func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface, + namespace, joiningClusterName, hostClusterName string, + scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (string, error) { + + klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName) + + saName, err := createServiceAccount(joiningClusterClientset, namespace, + joiningClusterName, hostClusterName, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v", + saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created service account: %s in joining cluster: %s", saName, joiningClusterName) + + if scope == apiextv1b1.NamespaceScoped { + klog.V(2).Infof("Creating role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName) + + err = createRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating role and binding for service account: %s in joining cluster: %s due to: %v", saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created role and binding for service account: %s in joining cluster: %s", + saName, joiningClusterName) + + klog.V(2).Infof("Creating health check cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName) + + err = createHealthCheckClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, + dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating health check cluster role and binding for service account: %s in joining cluster: %s due to: %v", + saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created health check cluster role and binding for service account: %s in joining cluster: %s", + saName, joiningClusterName) + + } else { + klog.V(2).Infof("Creating cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName) + + err = createClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating cluster role and binding for service account: %s in joining cluster: %s due to: %v", + saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created cluster role and binding for service account: %s in joining cluster: %s", + saName, joiningClusterName) + } + + return saName, nil +} + +// createServiceAccount creates a service account in the cluster associated +// with clusterClientset with credentials that will be used by the host cluster +// to access its API server. +func createServiceAccount(clusterClientset kubeclient.Interface, namespace, + joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) { + saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName) + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: saName, + Namespace: namespace, + }, + } + + if dryRun { + return saName, nil + } + + // Create a new service account. + _, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Create(sa) + switch { + case apierrors.IsAlreadyExists(err) && errorOnExisting: + klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName) + return "", err + case err != nil && !apierrors.IsAlreadyExists(err): + klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err) + return "", err + default: + return saName, nil + } +} + +func bindingSubjects(saName, namespace string) []rbacv1.Subject { + return []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: saName, + Namespace: namespace, + }, + } +} + +// createClusterRoleAndBinding creates an RBAC cluster role and +// binding that allows the service account identified by saName to +// access all resources in all namespaces in the cluster associated +// with clientset. +func createClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error { + if dryRun { + return nil + } + + roleName := util.RoleName(saName) + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: clusterPolicyRules, + } + existingRole, err := clientset.RbacV1().ClusterRoles().Get(roleName, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get cluster role for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("cluster role for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + existingRole.Rules = role.Rules + _, err := clientset.RbacV1().ClusterRoles().Update(existingRole) + if err != nil { + klog.V(2).Infof("Could not update cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + default: // role was not found + _, err := clientset.RbacV1().ClusterRoles().Create(role) + if err != nil { + klog.V(2).Infof("Could not create cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + // TODO: This should limit its access to only necessary resources. + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Subjects: bindingSubjects(saName, namespace), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + } + existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get cluster role binding for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + // The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding + // must be deleted and recreated with the correct roleRef + if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) { + err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{}) + if err != nil { + klog.V(2).Infof("Could not delete existing cluster role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } else { + existingBinding.Subjects = binding.Subjects + _, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding) + if err != nil { + klog.V(2).Infof("Could not update cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + default: + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + return nil +} + +// createRoleAndBinding creates an RBAC role and binding +// that allows the service account identified by saName to access all +// resources in the specified namespace. +func createRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error { + if dryRun { + return nil + } + + roleName := util.RoleName(saName) + + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: namespacedPolicyRules, + } + existingRole, err := clientset.RbacV1().Roles(namespace).Get(roleName, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not retrieve role for service account %s in joining cluster %s due to %v", saName, clusterName, err) + return err + case errorOnExisting && err == nil: + return errors.Errorf("role for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + existingRole.Rules = role.Rules + _, err = clientset.RbacV1().Roles(namespace).Update(existingRole) + if err != nil { + klog.V(2).Infof("Could not update role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + default: + _, err := clientset.RbacV1().Roles(namespace).Create(role) + if err != nil { + klog.V(2).Infof("Could not create role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + binding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Subjects: bindingSubjects(saName, namespace), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: roleName, + }, + } + + existingBinding, err := clientset.RbacV1().RoleBindings(namespace).Get(binding.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not retrieve role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("role binding for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + // The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding + // must be deleted and recreated with the correct roleRef + if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) { + err = clientset.RbacV1().RoleBindings(namespace).Delete(existingBinding.Name, &metav1.DeleteOptions{}) + if err != nil { + klog.V(2).Infof("Could not delete existing role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + _, err = clientset.RbacV1().RoleBindings(namespace).Create(binding) + if err != nil { + klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } else { + existingBinding.Subjects = binding.Subjects + _, err = clientset.RbacV1().RoleBindings(namespace).Update(existingBinding) + if err != nil { + klog.V(2).Infof("Could not update role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + } + default: + _, err = clientset.RbacV1().RoleBindings(namespace).Create(binding) + if err != nil { + klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + return nil +} + +// createHealthCheckClusterRoleAndBinding creates an RBAC cluster role and +// binding that allows the service account identified by saName to +// access the health check path of the cluster. +func createHealthCheckClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error { + if dryRun { + return nil + } + + roleName := util.HealthCheckRoleName(saName, namespace) + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"Get"}, + NonResourceURLs: []string{"/healthz"}, + }, + // The cluster client expects to be able to list nodes to retrieve zone and region details. + // TODO(marun) Consider making zone/region retrieval optional + { + Verbs: []string{"list"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + } + existingRole, err := clientset.RbacV1().ClusterRoles().Get(role.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get health check cluster role for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("health check cluster role for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + existingRole.Rules = role.Rules + _, err := clientset.RbacV1().ClusterRoles().Update(existingRole) + if err != nil { + klog.V(2).Infof("Could not update health check cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + default: // role was not found + _, err := clientset.RbacV1().ClusterRoles().Create(role) + if err != nil { + klog.V(2).Infof("Could not create health check cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Subjects: bindingSubjects(saName, namespace), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + } + existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get health check cluster role binding for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("health check cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + // The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding + // must be deleted and recreated with the correct roleRef + if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) { + err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{}) + if err != nil { + klog.V(2).Infof("Could not delete existing health check cluster role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } else { + existingBinding.Subjects = binding.Subjects + _, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding) + if err != nil { + klog.V(2).Infof("Could not update health check cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + default: + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + return nil +} + +// populateSecretInHostCluster copies the service account secret for saName +// from the cluster referenced by clusterClientset to the client referenced by +// hostClientset, putting it in a secret named secretName in the provided +// namespace. +func populateSecretInHostCluster(clusterClientset, hostClientset kubeclient.Interface, + saName, hostNamespace, joiningNamespace, joiningClusterName, secretName string, + dryRun bool) (*corev1.Secret, []byte, error) { + + klog.V(2).Infof("Creating cluster credentials secret in host cluster") + + if dryRun { + dryRunSecret := &corev1.Secret{} + dryRunSecret.Name = secretName + return dryRunSecret, nil, nil + } + + // Get the secret from the joining cluster. + var secret *corev1.Secret + err := wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) { + sa, err := clusterClientset.CoreV1().ServiceAccounts(joiningNamespace).Get(saName, + metav1.GetOptions{}) + if err != nil { + return false, nil + } + + for _, objReference := range sa.Secrets { + saSecretName := objReference.Name + var err error + secret, err = clusterClientset.CoreV1().Secrets(joiningNamespace).Get(saSecretName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + if secret.Type == corev1.SecretTypeServiceAccountToken { + klog.V(2).Infof("Using secret named: %s", secret.Name) + return true, nil + } + } + return false, nil + }) + + if err != nil { + klog.V(2).Infof("Could not get service account secret from joining cluster: %v", err) + return nil, nil, err + } + + token, ok := secret.Data[tokenKey] + if !ok { + return nil, nil, errors.Errorf("Key %q not found in service account secret", tokenKey) + } + + // Create a secret in the host cluster containing the token. + v1Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: hostNamespace, + }, + Data: map[string][]byte{ + tokenKey: token, + }, + } + + if secretName == "" { + v1Secret.GenerateName = joiningClusterName + "-" + } else { + v1Secret.Name = secretName + } + + var v1SecretResult *corev1.Secret + _, err = hostClientset.CoreV1().Secrets(hostNamespace).Get(v1Secret.Name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Create(&v1Secret) + if err != nil { + klog.V(2).Infof("Could not create secret in host cluster: %v", err) + return nil, nil, err + } + return v1SecretResult, nil, nil + } + klog.V(2).Infof("Could not get secret %s in host cluster: %v", v1Secret.Name, err) + return nil, nil, err + } else { + v1SecretResult, err = hostClientset.CoreV1().Secrets(hostNamespace).Update(&v1Secret) + if err != nil { + klog.V(2).Infof("Update secret %s in host cluster failed: %v", v1Secret.Name, err) + return nil, nil, err + } + } + + // caBundle is optional so no error is suggested if it is not + // found in the secret. + caBundle := secret.Data["ca.crt"] + + klog.V(2).Infof("Created secret in host cluster named: %s", v1SecretResult.Name) + return v1SecretResult, caBundle, nil +} diff --git a/pkg/controller/cluster/unjoin.go b/pkg/controller/cluster/unjoin.go new file mode 100644 index 000000000..d89727322 --- /dev/null +++ b/pkg/controller/cluster/unjoin.go @@ -0,0 +1,296 @@ +package cluster + +import ( + "context" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog" + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +// Following code copied from sigs.k8s.io/kubefed to avoid import collision + +// UnjoinCluster performs all the necessary steps to remove the +// registration of a cluster from a KubeFed control plane provided the +// required set of parameters are passed in. +func unjoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, hostClusterName, unjoiningClusterName string, forceDeletion, dryRun bool) error { + + hostClientset, err := util.HostClientset(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get host cluster clientset: %v", err) + return err + } + + var clusterClientset *kubeclient.Clientset + if clusterConfig != nil { + clusterClientset, err = util.ClusterClientset(clusterConfig) + if err != nil { + klog.V(2).Infof("Failed to get unjoining cluster clientset: %v", err) + if !forceDeletion { + return err + } + } + } + + client, err := genericclient.New(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get kubefed clientset: %v", err) + return err + } + + if clusterClientset != nil { + err := deleteRBACResources(clusterClientset, kubefedNamespace, unjoiningClusterName, hostClusterName, forceDeletion, dryRun) + if err != nil { + if !forceDeletion { + return err + } + klog.V(2).Infof("Failed to delete RBAC resources: %v", err) + } + + err = deleteFedNSFromUnjoinCluster(hostClientset, clusterClientset, kubefedNamespace, unjoiningClusterName, dryRun) + if err != nil { + if !forceDeletion { + return err + } + klog.V(2).Infof("Failed to delete kubefed namespace: %v", err) + } + } + + // deletionSucceeded when all operations in deleteRBACResources and deleteFedNSFromUnjoinCluster succeed. + err = deleteFederatedClusterAndSecret(hostClientset, client, kubefedNamespace, unjoiningClusterName, forceDeletion, dryRun) + if err != nil { + return err + } + return nil +} + +// deleteKubeFedClusterAndSecret deletes a federated cluster resource that associates +// the cluster and secret. +func deleteFederatedClusterAndSecret(hostClientset kubeclient.Interface, client genericclient.Client, + kubefedNamespace, unjoiningClusterName string, forceDeletion, dryRun bool) error { + if dryRun { + return nil + } + + klog.V(2).Infof("Deleting kubefed cluster resource from namespace %q for unjoin cluster %q", + kubefedNamespace, unjoiningClusterName) + + fedCluster := &fedv1b1.KubeFedCluster{} + err := client.Get(context.TODO(), fedCluster, kubefedNamespace, unjoiningClusterName) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrapf(err, "Failed to get kubefed cluster \"%s/%s\"", kubefedNamespace, unjoiningClusterName) + } + + err = hostClientset.CoreV1().Secrets(kubefedNamespace).Delete(fedCluster.Spec.SecretRef.Name, + &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Secret \"%s/%s\" does not exist in the host cluster.", kubefedNamespace, fedCluster.Spec.SecretRef.Name) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Failed to delete secret \"%s/%s\" for unjoin cluster %q", + kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted secret \"%s/%s\" for unjoin cluster %q", kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName) + } + + err = client.Delete(context.TODO(), fedCluster, fedCluster.Namespace, fedCluster.Name) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("KubeFed cluster \"%s/%s\" does not exist in the host cluster.", fedCluster.Namespace, fedCluster.Name) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Failed to delete kubefed cluster \"%s/%s\" for unjoin cluster %q", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted kubefed cluster \"%s/%s\" for unjoin cluster %q.", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName) + } + + return nil +} + +// deleteRBACResources deletes the cluster role, cluster rolebindings and service account +// from the unjoining cluster. +func deleteRBACResources(unjoiningClusterClientset kubeclient.Interface, + namespace, unjoiningClusterName, hostClusterName string, forceDeletion, dryRun bool) error { + + saName := ClusterServiceAccountName(unjoiningClusterName, hostClusterName) + + err := deleteClusterRoleAndBinding(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, forceDeletion, dryRun) + if err != nil { + return err + } + + err = deleteServiceAccount(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, dryRun) + if err != nil { + return err + } + + return nil +} + +// deleteFedNSFromUnjoinCluster deletes the kubefed namespace from +// the unjoining cluster so long as the unjoining cluster is not the +// host cluster. +func deleteFedNSFromUnjoinCluster(hostClientset, unjoiningClusterClientset kubeclient.Interface, + kubefedNamespace, unjoiningClusterName string, dryRun bool) error { + + if dryRun { + return nil + } + + hostClusterNamespace, err := hostClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "Error retrieving namespace %q from host cluster", kubefedNamespace) + } + + unjoiningClusterNamespace, err := unjoiningClusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "Error retrieving namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName) + } + + if IsPrimaryCluster(hostClusterNamespace, unjoiningClusterNamespace) { + klog.V(2).Infof("The kubefed namespace %q does not need to be deleted from the host cluster by unjoin.", kubefedNamespace) + return nil + } + + klog.V(2).Infof("Deleting kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName) + err = unjoiningClusterClientset.CoreV1().Namespaces().Delete(kubefedNamespace, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("The kubefed namespace %q no longer exists in unjoining cluster %q.", kubefedNamespace, unjoiningClusterName) + return nil + } else if err != nil { + return errors.Wrapf(err, "Could not delete kubefed namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName) + } else { + klog.V(2).Infof("Deleted kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName) + } + + return nil +} + +// deleteServiceAccount deletes a service account in the cluster associated +// with clusterClientset with credentials that are used by the host cluster +// to access its API server. +func deleteServiceAccount(clusterClientset kubeclient.Interface, saName, + namespace, unjoiningClusterName string, dryRun bool) error { + if dryRun { + return nil + } + + klog.V(2).Infof("Deleting service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName) + + // Delete a service account. + err := clusterClientset.CoreV1().ServiceAccounts(namespace).Delete(saName, + &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Service account \"%s/%s\" does not exist.", namespace, saName) + } else if err != nil { + return errors.Wrapf(err, "Could not delete service account \"%s/%s\"", namespace, saName) + } else { + klog.V(2).Infof("Deleted service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName) + } + + return nil +} + +// deleteClusterRoleAndBinding deletes an RBAC cluster role and binding that +// allows the service account identified by saName to access all resources in +// all namespaces in the cluster associated with clusterClientset. +func deleteClusterRoleAndBinding(clusterClientset kubeclient.Interface, + saName, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error { + if dryRun { + return nil + } + + roleName := util.RoleName(saName) + healthCheckRoleName := util.HealthCheckRoleName(saName, namespace) + + // Attempt to delete all role and role bindings created by join + for _, name := range []string{roleName, healthCheckRoleName} { + klog.V(2).Infof("Deleting cluster role binding %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + + err := clusterClientset.RbacV1().ClusterRoleBindings().Delete(name, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Cluster role binding %q for service account %q does not exist in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete cluster role binding %q for service account %q in unjoining cluster %q", + name, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted cluster role binding %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } + + klog.V(2).Infof("Deleting cluster role %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + err = clusterClientset.RbacV1().ClusterRoles().Delete(name, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Cluster role %q for service account %q does not exist in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete cluster role %q for service account %q in unjoining cluster %q", + name, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted cluster role %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } + } + + klog.V(2).Infof("Deleting role binding \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + err := clusterClientset.RbacV1().RoleBindings(namespace).Delete(roleName, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Role binding \"%s/%s\" for service account %q does not exist in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete role binding \"%s/%s\" for service account %q in unjoining cluster %q", + namespace, roleName, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted role binding \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } + + klog.V(2).Infof("Deleting role \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + err = clusterClientset.RbacV1().Roles(namespace).Delete(roleName, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Role \"%s/%s\" for service account %q does not exist in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete role \"%s/%s\" for service account %q in unjoining cluster %q", + namespace, roleName, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleting Role \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } + + return nil +} diff --git a/pkg/controller/cluster/util.go b/pkg/controller/cluster/util.go new file mode 100644 index 000000000..27c71309c --- /dev/null +++ b/pkg/controller/cluster/util.go @@ -0,0 +1,166 @@ +package cluster + +import ( + "fmt" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "strings" +) + +// Default values for the federated group and version used by +// the enable and disable subcommands of `kubefedctl`. +const ( + DefaultFederatedGroup = "types.kubefed.io" + DefaultFederatedVersion = "v1beta1" + + FederatedKindPrefix = "Federated" +) + +// FedConfig provides a rest config based on the filesystem kubeconfig (via +// pathOptions) and context in order to talk to the host kubernetes cluster +// and the joining kubernetes cluster. +type FedConfig interface { + HostConfig(context, kubeconfigPath string) (*rest.Config, error) + ClusterConfig(context, kubeconfigPath string) (*rest.Config, error) + GetClientConfig(ontext, kubeconfigPath string) clientcmd.ClientConfig +} + +// fedConfig implements the FedConfig interface. +type fedConfig struct { + pathOptions *clientcmd.PathOptions +} + +// NewFedConfig creates a fedConfig for `kubefedctl` commands. +func NewFedConfig(pathOptions *clientcmd.PathOptions) FedConfig { + return &fedConfig{ + pathOptions: pathOptions, + } +} + +// HostConfig provides a rest config to talk to the host kubernetes cluster +// based on the context and kubeconfig passed in. +func (a *fedConfig) HostConfig(context, kubeconfigPath string) (*rest.Config, error) { + hostConfig := a.GetClientConfig(context, kubeconfigPath) + hostClientConfig, err := hostConfig.ClientConfig() + if err != nil { + return nil, err + } + + return hostClientConfig, nil +} + +// ClusterConfig provides a rest config to talk to the joining kubernetes +// cluster based on the context and kubeconfig passed in. +func (a *fedConfig) ClusterConfig(context, kubeconfigPath string) (*rest.Config, error) { + clusterConfig := a.GetClientConfig(context, kubeconfigPath) + clusterClientConfig, err := clusterConfig.ClientConfig() + if err != nil { + return nil, err + } + + return clusterClientConfig, nil +} + +// getClientConfig is a helper method to create a client config from the +// context and kubeconfig passed as arguments. +func (a *fedConfig) GetClientConfig(context, kubeconfigPath string) clientcmd.ClientConfig { + loadingRules := *a.pathOptions.LoadingRules + loadingRules.Precedence = a.pathOptions.GetLoadingPrecedence() + loadingRules.ExplicitPath = kubeconfigPath + overrides := &clientcmd.ConfigOverrides{ + CurrentContext: context, + } + + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, overrides) +} + +// HostClientset provides a kubernetes API compliant clientset to +// communicate with the host cluster's kubernetes API server. +func HostClientset(config *rest.Config) (*kubeclient.Clientset, error) { + return kubeclient.NewForConfig(config) +} + +// ClusterClientset provides a kubernetes API compliant clientset to +// communicate with the joining cluster's kubernetes API server. +func ClusterClientset(config *rest.Config) (*kubeclient.Clientset, error) { + return kubeclient.NewForConfig(config) +} + +// ClusterServiceAccountName returns the name of a service account whose +// credentials are used by the host cluster to access the client cluster. +func ClusterServiceAccountName(joiningClusterName, hostClusterName string) string { + return fmt.Sprintf("%s-%s", joiningClusterName, hostClusterName) +} + +// RoleName returns the name of a Role or ClusterRole and its +// associated RoleBinding or ClusterRoleBinding that are used to allow +// the service account to access necessary resources on the cluster. +func RoleName(serviceAccountName string) string { + return fmt.Sprintf("kubefed-controller-manager:%s", serviceAccountName) +} + +// HealthCheckRoleName returns the name of a ClusterRole and its +// associated ClusterRoleBinding that is used to allow the service +// account to check the health of the cluster and list nodes. +func HealthCheckRoleName(serviceAccountName, namespace string) string { + return fmt.Sprintf("kubefed-controller-manager:%s:healthcheck-%s", namespace, serviceAccountName) +} + +// IsFederatedAPIResource checks if a resource with the given Kind and group is a Federated one +func IsFederatedAPIResource(kind, group string) bool { + return strings.HasPrefix(kind, FederatedKindPrefix) && group == DefaultFederatedGroup +} + +// GetNamespace returns namespace of the current context +func GetNamespace(hostClusterContext string, kubeconfig string, config FedConfig) (string, error) { + clientConfig := config.GetClientConfig(hostClusterContext, kubeconfig) + currentContext, err := CurrentContext(clientConfig) + if err != nil { + return "", err + } + + ns, _, err := clientConfig.Namespace() + if err != nil { + return "", errors.Wrapf(err, "Failed to get ClientConfig for host cluster context %q and kubeconfig %q", + currentContext, kubeconfig) + } + + if len(ns) == 0 { + ns = "default" + } + return ns, nil +} + +// CurrentContext retrieves the current context from the provided config. +func CurrentContext(config clientcmd.ClientConfig) (string, error) { + rawConfig, err := config.RawConfig() + if err != nil { + return "", errors.Wrap(err, "Failed to get current context from config") + } + return rawConfig.CurrentContext, nil +} + +// IsPrimaryCluster checks if the caller is working with objects for the +// primary cluster by checking if the UIDs match for both ObjectMetas passed +// in. +// TODO (font): Need to revisit this when cluster ID is available. +func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool { + meta := MetaAccessor(obj) + clusterMeta := MetaAccessor(clusterObj) + return meta.GetUID() == clusterMeta.GetUID() +} + +func MetaAccessor(obj pkgruntime.Object) metav1.Object { + accessor, err := meta.Accessor(obj) + if err != nil { + // This should always succeed if obj is not nil. Also, + // adapters are slated for replacement by unstructured. + return nil + } + return accessor +} diff --git a/pkg/simple/client/multicluster/options.go b/pkg/simple/client/multicluster/options.go new file mode 100644 index 000000000..9d09febbc --- /dev/null +++ b/pkg/simple/client/multicluster/options.go @@ -0,0 +1,26 @@ +package multicluster + +import "github.com/spf13/pflag" + +type Options struct { + // Enable + Enable bool `json:"enable"` + EnableFederation bool `json:"enableFederation,omitempty"` +} + +// NewOptions() returns a default nil options +func NewOptions() *Options { + return &Options{ + Enable: false, + EnableFederation: false, + } +} + +func (o *Options) Validate() []error { + return nil +} + +func (o *Options) AddFlags(fs *pflag.FlagSet, s *Options) { + fs.BoolVar(&o.Enable, "multiple-clusters", s.Enable, ""+ + "This field instructs KubeSphere to enter multiple-cluster mode or not.") +} diff --git a/vendor/k8s.io/kubectl/LICENSE b/vendor/k8s.io/kubectl/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/k8s.io/kubectl/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS new file mode 100644 index 000000000..99dabed08 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +reviewers: +- apelisse diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go new file mode 100644 index 000000000..08194d580 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package openapi is a collection of libraries for fetching the openapi spec +// from a Kubernetes server and then indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata +// such as the patchStrategy and patchMergeKey for creating patches. +package openapi // k8s.io/kubectl/pkg/util/openapi diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/dryrun.go b/vendor/k8s.io/kubectl/pkg/util/openapi/dryrun.go new file mode 100644 index 000000000..33cf9e9e5 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/dryrun.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "errors" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + yaml "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func hasGVKExtension(extensions []*openapi_v2.NamedAny, gvk schema.GroupVersionKind) bool { + for _, extension := range extensions { + if extension.GetValue().GetYaml() == "" || + extension.GetName() != "x-kubernetes-group-version-kind" { + continue + } + var value map[string]string + err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version { + return true + } + return false + } + return false +} + +// SupportsDryRun is a method that let's us look in the OpenAPI if the +// specific group-version-kind supports the dryRun query parameter for +// the PATCH end-point. +func SupportsDryRun(doc *openapi_v2.Document, gvk schema.GroupVersionKind) (bool, error) { + for _, path := range doc.GetPaths().GetPath() { + // Is this describing the gvk we're looking for? + if !hasGVKExtension(path.GetValue().GetPatch().GetVendorExtension(), gvk) { + continue + } + for _, param := range path.GetValue().GetPatch().GetParameters() { + if param.GetParameter().GetNonBodyParameter().GetQueryParameterSubSchema().GetName() == "dryRun" { + return true, nil + } + } + return false, nil + } + + return false, errors.New("couldn't find GVK in openapi") +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go b/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go new file mode 100644 index 000000000..f1b5cdd4f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import "github.com/go-openapi/spec" + +// PrintColumnsKey is the key that defines which columns should be printed +const PrintColumnsKey = "x-kubernetes-print-columns" + +// GetPrintColumns looks for the open API extension for the display columns. +func GetPrintColumns(extensions spec.Extensions) (string, bool) { + return extensions.GetString(PrintColumnsKey) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go new file mode 100644 index 000000000..c8f370b99 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) + +// Resources interface describe a resources provider, that can give you +// resource based on group-version-kind. +type Resources interface { + LookupResource(gvk schema.GroupVersionKind) proto.Schema +} + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// document is an implementation of `Resources`. It looks for +// resources in an openapi Schema. +type document struct { + // Maps gvk to model name + resources map[schema.GroupVersionKind]string + models proto.Models +} + +var _ Resources = &document{} + +// NewOpenAPIData creates a new `Resources` out of the openapi document +func NewOpenAPIData(doc *openapi_v2.Document) (Resources, error) { + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + resources := map[schema.GroupVersionKind]string{} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic("ListModels returns a model that can't be looked-up.") + } + gvkList := parseGroupVersionKind(model) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + resources[gvk] = modelName + } + } + } + + return &document{ + resources: resources, + models: models, + }, nil +} + +func (d *document) LookupResource(gvk schema.GroupVersionKind) proto.Schema { + modelName, found := d.resources[gvk] + if !found { + return nil + } + return d.models.LookupModel(modelName) +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go new file mode 100644 index 000000000..d5c9476a0 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "sync" + + "k8s.io/client-go/discovery" +) + +// synchronizedOpenAPIGetter fetches the openapi schema once and then caches it in memory +type synchronizedOpenAPIGetter struct { + // Cached results + sync.Once + openAPISchema Resources + err error + + openAPIClient discovery.OpenAPISchemaInterface +} + +var _ Getter = &synchronizedOpenAPIGetter{} + +// Getter is an interface for fetching openapi specs and parsing them into an Resources struct +type Getter interface { + // OpenAPIData returns the parsed OpenAPIData + Get() (Resources, error) +} + +// NewOpenAPIGetter returns an object to return OpenAPIDatas which reads +// from a server, and then stores in memory for subsequent invocations +func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) Getter { + return &synchronizedOpenAPIGetter{ + openAPIClient: openAPIClient, + } +} + +// Resources implements Getter +func (g *synchronizedOpenAPIGetter) Get() (Resources, error) { + g.Do(func() { + s, err := g.openAPIClient.OpenAPISchema() + if err != nil { + g.err = err + return + } + + g.openAPISchema, g.err = NewOpenAPIData(s) + }) + + // Return the save result + return g.openAPISchema, g.err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index deffc100c..376b710ac 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,9 +7,9 @@ github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm # github.com/Azure/go-autorest/autorest v0.9.0 => github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/azure # github.com/Azure/go-autorest/autorest/adal v0.5.0 => github.com/Azure/go-autorest/autorest/adal v0.5.0 github.com/Azure/go-autorest/autorest/adal +github.com/Azure/go-autorest/autorest/azure # github.com/Azure/go-autorest/autorest/date v0.1.0 => github.com/Azure/go-autorest/autorest/date v0.1.0 github.com/Azure/go-autorest/autorest/date # github.com/Azure/go-autorest/logger v0.1.0 => github.com/Azure/go-autorest/logger v0.1.0 @@ -166,9 +166,9 @@ github.com/elastic/go-elasticsearch/v7/estransport github.com/elastic/go-elasticsearch/v7/internal/version # github.com/emicklei/go-restful v2.9.5+incompatible => github.com/emicklei/go-restful v2.9.5+incompatible github.com/emicklei/go-restful -github.com/emicklei/go-restful/log # github.com/emicklei/go-restful-openapi v1.0.0 => github.com/emicklei/go-restful-openapi v1.0.0 github.com/emicklei/go-restful-openapi +github.com/emicklei/go-restful/log # github.com/emirpasic/gods v1.12.0 => github.com/emirpasic/gods v1.12.0 github.com/emirpasic/gods/containers github.com/emirpasic/gods/lists @@ -345,7 +345,7 @@ github.com/inconshreveable/mousetrap github.com/jbenet/go-context/io # github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af => github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af github.com/jmespath/go-jmespath -# github.com/json-iterator/go v1.1.8 => github.com/json-iterator/go v1.1.8 +# github.com/json-iterator/go v1.1.9 => github.com/json-iterator/go v1.1.8 github.com/json-iterator/go # github.com/kelseyhightower/envconfig v1.4.0 => github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig @@ -421,7 +421,7 @@ github.com/modern-go/reflect2 github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f => github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo v1.8.0 => github.com/onsi/ginkgo v1.8.0 +# github.com/onsi/ginkgo v1.12.0 => github.com/onsi/ginkgo v1.8.0 github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/extensions/table @@ -441,7 +441,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.5.0 => github.com/onsi/gomega v1.5.0 +# github.com/onsi/gomega v1.9.0 => github.com/onsi/gomega v1.5.0 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gbytes @@ -510,7 +510,7 @@ github.com/pborman/uuid github.com/pelletier/go-buffruneio # github.com/pelletier/go-toml v1.2.0 => github.com/pelletier/go-toml v1.2.0 github.com/pelletier/go-toml -# github.com/pkg/errors v0.8.1 => github.com/pkg/errors v0.8.1 +# github.com/pkg/errors v0.9.1 => github.com/pkg/errors v0.8.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 => github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib @@ -549,7 +549,7 @@ github.com/projectcalico/libcalico-go/lib/selector/tokenizer github.com/projectcalico/libcalico-go/lib/set github.com/projectcalico/libcalico-go/lib/validator/v3 github.com/projectcalico/libcalico-go/lib/watch -# github.com/prometheus/client_golang v0.9.4 => github.com/prometheus/client_golang v0.9.4 +# github.com/prometheus/client_golang v1.0.0 => github.com/prometheus/client_golang v0.9.4 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus @@ -847,7 +847,7 @@ gopkg.in/src-d/go-git.v4/utils/merkletrie/noder gopkg.in/tomb.v1 # gopkg.in/warnings.v0 v0.1.2 => gopkg.in/warnings.v0 v0.1.2 gopkg.in/warnings.v0 -# gopkg.in/yaml.v2 v2.2.4 => gopkg.in/yaml.v2 v2.2.4 +# gopkg.in/yaml.v2 v2.2.8 => gopkg.in/yaml.v2 v2.2.4 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 => gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 gopkg.in/yaml.v3 @@ -899,7 +899,7 @@ istio.io/client-go/pkg/listers/security/v1beta1 # istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a => istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a istio.io/gogo-genproto/googleapis/google/api istio.io/gogo-genproto/googleapis/google/rpc -# k8s.io/api v0.0.0-20191114100352-16d7abae0d2a => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a +# k8s.io/api v0.17.3 => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -940,7 +940,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 => k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 +# k8s.io/apiextensions-apiserver v0.17.3 => k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 @@ -948,7 +948,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb +# k8s.io/apimachinery v0.17.3 => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1004,7 +1004,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 => k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 +# k8s.io/apiserver v0.17.3 => k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -1108,7 +1108,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 => k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 +# k8s.io/client-go v0.17.3 => k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 k8s.io/client-go/discovery k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic @@ -1319,7 +1319,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 +# k8s.io/code-generator v0.17.3 => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators @@ -1338,7 +1338,7 @@ k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util -# k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 => k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 +# k8s.io/component-base v0.17.3 => k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 k8s.io/component-base/cli/flag k8s.io/component-base/featuregate k8s.io/component-base/logs @@ -1368,6 +1368,8 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets +# k8s.io/kubectl v0.17.3 => k8s.io/kubectl v0.17.3 +k8s.io/kubectl/pkg/util/openapi # k8s.io/utils v0.0.0-20191114184206-e782cd3c129f => k8s.io/utils v0.0.0-20191114184206-e782cd3c129f k8s.io/utils/buffer k8s.io/utils/integer @@ -1389,7 +1391,7 @@ openpitrix.io/openpitrix/pkg/util/reflectutil openpitrix.io/openpitrix/pkg/util/stringutil openpitrix.io/openpitrix/pkg/util/yamlutil openpitrix.io/openpitrix/pkg/version -# sigs.k8s.io/controller-runtime v0.4.0 => sigs.k8s.io/controller-runtime v0.4.0 +# sigs.k8s.io/controller-runtime v0.5.0 => sigs.k8s.io/controller-runtime v0.4.0 sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/client @@ -1439,6 +1441,24 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml sigs.k8s.io/controller-tools/pkg/version sigs.k8s.io/controller-tools/pkg/webhook +# sigs.k8s.io/kubefed v0.2.0-alpha.1 => sigs.k8s.io/kubefed v0.2.0-alpha.1 +sigs.k8s.io/kubefed/pkg/apis +sigs.k8s.io/kubefed/pkg/apis/core/common +sigs.k8s.io/kubefed/pkg/apis/core/typeconfig +sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1 +sigs.k8s.io/kubefed/pkg/apis/core/v1beta1 +sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1 +sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1 +sigs.k8s.io/kubefed/pkg/client/generic +sigs.k8s.io/kubefed/pkg/client/generic/scheme +sigs.k8s.io/kubefed/pkg/controller/util +sigs.k8s.io/kubefed/pkg/kubefedctl +sigs.k8s.io/kubefed/pkg/kubefedctl/enable +sigs.k8s.io/kubefed/pkg/kubefedctl/federate +sigs.k8s.io/kubefed/pkg/kubefedctl/options +sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning +sigs.k8s.io/kubefed/pkg/kubefedctl/util +sigs.k8s.io/kubefed/pkg/version # sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca => sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca sigs.k8s.io/structured-merge-diff/fieldpath sigs.k8s.io/structured-merge-diff/merge diff --git a/vendor/sigs.k8s.io/kubefed/LICENSE b/vendor/sigs.k8s.io/kubefed/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_core_v1alpha1.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_core_v1alpha1.go new file mode 100644 index 000000000..17dee4836 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_core_v1alpha1.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_core_v1beta1.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_core_v1beta1.go new file mode 100644 index 000000000..d61d3d4df --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_core_v1beta1.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_multiclusterdns_v1alpha1.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_multiclusterdns_v1alpha1.go new file mode 100644 index 000000000..e162dee8d --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_multiclusterdns_v1alpha1.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_scheduling_v1alpha1.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_scheduling_v1alpha1.go new file mode 100644 index 000000000..21acab367 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/addtoscheme_scheduling_v1alpha1.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + "sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/apis.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/apis.go new file mode 100644 index 000000000..d38c2e2d8 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/apis.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apis contains Kubernetes API groups. +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/common/constants.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/common/constants.go new file mode 100644 index 000000000..3f3c0a674 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/common/constants.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +type ClusterConditionType string + +// These are valid conditions of a cluster. +const ( + // ClusterReady means the cluster is ready to accept workloads. + ClusterReady ClusterConditionType = "Ready" + // ClusterOffline means the cluster is temporarily down or not reachable + ClusterOffline ClusterConditionType = "Offline" +) + +const ( + NamespaceName = "namespaces" +) diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/common/util.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/common/util.go new file mode 100644 index 000000000..30284c903 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/common/util.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "strings" +) + +func PropagatedVersionName(kind, resourceName string) string { + return fmt.Sprintf("%s%s", PropagatedVersionPrefix(kind), resourceName) +} + +func PropagatedVersionPrefix(kind string) string { + return fmt.Sprintf("%s-", strings.ToLower(kind)) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/typeconfig/interface.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/typeconfig/interface.go new file mode 100644 index 000000000..a5926272d --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/typeconfig/interface.go @@ -0,0 +1,34 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typeconfig + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Interface defines how to interact with a FederatedTypeConfig +type Interface interface { + GetObjectMeta() metav1.ObjectMeta + GetTargetType() metav1.APIResource + GetNamespaced() bool + GetPropagationEnabled() bool + GetFederatedType() metav1.APIResource + GetStatusType() *metav1.APIResource + GetStatusEnabled() bool + GetFederatedNamespaced() bool + IsNamespace() bool +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/typeconfig/util.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/typeconfig/util.go new file mode 100644 index 000000000..999d64540 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/typeconfig/util.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typeconfig + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GroupQualifiedName returns the plural name of the api resource +// optionally qualified by its group: +// +// '[.]' +// +// This is the naming scheme for FederatedTypeConfig resources. The +// scheme ensures that, for a given KubeFed control plane, +// federation of a target type will be configured by at most one +// FederatedTypeConfig. +func GroupQualifiedName(apiResource metav1.APIResource) string { + if len(apiResource.Group) == 0 { + return apiResource.Name + } + return fmt.Sprintf("%s.%s", apiResource.Name, apiResource.Group) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/clusterpropagatedversion_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/clusterpropagatedversion_types.go new file mode 100644 index 000000000..85b07ee12 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/clusterpropagatedversion_types.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterPropagatedVersionSpec defines the desired state of ClusterPropagatedVersion +type ClusterPropagatedVersionSpec struct { +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterpropagatedversions,scope=Cluster +// +kubebuilder:subresource:status + +// ClusterPropagatedVersion holds version information about the state +// propagated from KubeFed APIs (configured by FederatedTypeConfig +// resources) to member clusters. The name of a ClusterPropagatedVersion +// encodes the kind and name of the resource it stores information for +// (i.e. -). If a target resource has +// a populated metadata.Generation field, the generation will be +// stored with a prefix of `gen:` as the version for the cluster. If +// metadata.Generation is not available, metadata.ResourceVersion will +// be stored with a prefix of `rv:` as the version for the cluster. +type ClusterPropagatedVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Status PropagatedVersionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterPropagatedVersionList contains a list of ClusterPropagatedVersion +type ClusterPropagatedVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterPropagatedVersion `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterPropagatedVersion{}, &ClusterPropagatedVersionList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/federatedservicestatus_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/federatedservicestatus_types.go new file mode 100644 index 000000000..983077760 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/federatedservicestatus_types.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FederatedServiceClusterStatus is the observed status of the resource for a named cluster +type FederatedServiceClusterStatus struct { + ClusterName string `json:"clusterName"` + Status corev1.ServiceStatus `json:"status"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=federatedservicestatuses + +type FederatedServiceStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + ClusterStatus []FederatedServiceClusterStatus `json:"clusterStatus,omitempty"` +} + +// +kubebuilder:object:root=true + +// FederatedServiceStatusList contains a list of FederatedServiceStatus +type FederatedServiceStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FederatedServiceStatus `json:"items"` +} + +func init() { + SchemeBuilder.Register(&FederatedServiceStatus{}, &FederatedServiceStatusList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/groupversion_info.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..bdf4cde62 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/groupversion_info.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the core v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=core.kubefed.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "core.kubefed.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/propagatedversion_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/propagatedversion_types.go new file mode 100644 index 000000000..a4764fc84 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/propagatedversion_types.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PropagatedVersionSpec defines the desired state of PropagatedVersion +type PropagatedVersionSpec struct { +} + +// PropagatedVersionStatus defines the observed state of PropagatedVersion +type PropagatedVersionStatus struct { + // The observed version of the template for this resource. + TemplateVersion string `json:"templateVersion"` + // The observed version of the overrides for this resource. + OverrideVersion string `json:"overridesVersion"` + // The last versions produced in each cluster for this resource. + // +optional + ClusterVersions []ClusterObjectVersion `json:"clusterVersions,omitempty"` +} + +type ClusterObjectVersion struct { + // The name of the cluster the version is for. + ClusterName string `json:"clusterName"` + // The last version produced for the resource by a KubeFed + // operation. + Version string `json:"version"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=propagatedversions +// +kubebuilder:subresource:status + +// PropagatedVersion holds version information about the state +// propagated from KubeFed APIs (configured by FederatedTypeConfig +// resources) to member clusters. The name of a PropagatedVersion +// encodes the kind and name of the resource it stores information for +// (i.e. -). If a target resource has +// a populated metadata.Generation field, the generation will be +// stored with a prefix of `gen:` as the version for the cluster. If +// metadata.Generation is not available, metadata.ResourceVersion will +// be stored with a prefix of `rv:` as the version for the cluster. +type PropagatedVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Status PropagatedVersionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PropagatedVersionList contains a list of PropagatedVersion +type PropagatedVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PropagatedVersion `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PropagatedVersion{}, &PropagatedVersionList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..36cb36cd2 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,286 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObjectVersion) DeepCopyInto(out *ClusterObjectVersion) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObjectVersion. +func (in *ClusterObjectVersion) DeepCopy() *ClusterObjectVersion { + if in == nil { + return nil + } + out := new(ClusterObjectVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPropagatedVersion) DeepCopyInto(out *ClusterPropagatedVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropagatedVersion. +func (in *ClusterPropagatedVersion) DeepCopy() *ClusterPropagatedVersion { + if in == nil { + return nil + } + out := new(ClusterPropagatedVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPropagatedVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPropagatedVersionList) DeepCopyInto(out *ClusterPropagatedVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterPropagatedVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropagatedVersionList. +func (in *ClusterPropagatedVersionList) DeepCopy() *ClusterPropagatedVersionList { + if in == nil { + return nil + } + out := new(ClusterPropagatedVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterPropagatedVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPropagatedVersionSpec) DeepCopyInto(out *ClusterPropagatedVersionSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropagatedVersionSpec. +func (in *ClusterPropagatedVersionSpec) DeepCopy() *ClusterPropagatedVersionSpec { + if in == nil { + return nil + } + out := new(ClusterPropagatedVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedServiceClusterStatus) DeepCopyInto(out *FederatedServiceClusterStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedServiceClusterStatus. +func (in *FederatedServiceClusterStatus) DeepCopy() *FederatedServiceClusterStatus { + if in == nil { + return nil + } + out := new(FederatedServiceClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedServiceStatus) DeepCopyInto(out *FederatedServiceStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ClusterStatus != nil { + in, out := &in.ClusterStatus, &out.ClusterStatus + *out = make([]FederatedServiceClusterStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedServiceStatus. +func (in *FederatedServiceStatus) DeepCopy() *FederatedServiceStatus { + if in == nil { + return nil + } + out := new(FederatedServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FederatedServiceStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedServiceStatusList) DeepCopyInto(out *FederatedServiceStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FederatedServiceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedServiceStatusList. +func (in *FederatedServiceStatusList) DeepCopy() *FederatedServiceStatusList { + if in == nil { + return nil + } + out := new(FederatedServiceStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FederatedServiceStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedVersion) DeepCopyInto(out *PropagatedVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedVersion. +func (in *PropagatedVersion) DeepCopy() *PropagatedVersion { + if in == nil { + return nil + } + out := new(PropagatedVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PropagatedVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedVersionList) DeepCopyInto(out *PropagatedVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PropagatedVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedVersionList. +func (in *PropagatedVersionList) DeepCopy() *PropagatedVersionList { + if in == nil { + return nil + } + out := new(PropagatedVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PropagatedVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedVersionSpec) DeepCopyInto(out *PropagatedVersionSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedVersionSpec. +func (in *PropagatedVersionSpec) DeepCopy() *PropagatedVersionSpec { + if in == nil { + return nil + } + out := new(PropagatedVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropagatedVersionStatus) DeepCopyInto(out *PropagatedVersionStatus) { + *out = *in + if in.ClusterVersions != nil { + in, out := &in.ClusterVersions, &out.ClusterVersions + *out = make([]ClusterObjectVersion, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropagatedVersionStatus. +func (in *PropagatedVersionStatus) DeepCopy() *PropagatedVersionStatus { + if in == nil { + return nil + } + out := new(PropagatedVersionStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/federatedtypeconfig_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/federatedtypeconfig_types.go new file mode 100644 index 000000000..f7c7d62ec --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/federatedtypeconfig_types.go @@ -0,0 +1,257 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "strings" + + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/kubefed/pkg/apis/core/common" +) + +// FederatedTypeConfigSpec defines the desired state of FederatedTypeConfig. +type FederatedTypeConfigSpec struct { + // The configuration of the target type. If not set, the pluralName and + // groupName fields will be set from the metadata.name of this resource. The + // kind field must be set. + TargetType APIResource `json:"targetType"` + // Whether or not propagation to member clusters should be enabled. + Propagation PropagationMode `json:"propagation"` + // Configuration for the federated type that defines (via + // template, placement and overrides fields) how the target type + // should appear in multiple cluster. + FederatedType APIResource `json:"federatedType"` + // Configuration for the status type that holds information about which type + // holds the status of the federated resource. If not provided, the group + // and version will default to those provided for the federated type api + // resource. + // +optional + StatusType *APIResource `json:"statusType,omitempty"` + // Whether or not Status object should be populated. + // +optional + StatusCollection *StatusCollectionMode `json:"statusCollection,omitempty"` +} + +// APIResource defines how to configure the dynamic client for an API resource. +type APIResource struct { + // metav1.GroupVersion is not used since the json annotation of + // the fields enforces them as mandatory. + + // Group of the resource. + // +optional + Group string `json:"group,omitempty"` + // Version of the resource. + Version string `json:"version"` + // Camel-cased singular name of the resource (e.g. ConfigMap) + Kind string `json:"kind"` + // Lower-cased plural name of the resource (e.g. configmaps). If + // not provided, it will be computed by lower-casing the kind and + // suffixing an 's'. + PluralName string `json:"pluralName"` + // Scope of the resource. + Scope apiextv1b1.ResourceScope `json:"scope"` +} + +// PropagationMode defines the state of propagation to member clusters. +type PropagationMode string + +const ( + PropagationEnabled PropagationMode = "Enabled" + PropagationDisabled PropagationMode = "Disabled" +) + +// StatusCollectionMode defines the state of status collection. +type StatusCollectionMode string + +const ( + StatusCollectionEnabled StatusCollectionMode = "Enabled" + StatusCollectionDisabled StatusCollectionMode = "Disabled" +) + +// ControllerStatus defines the current state of the controller +type ControllerStatus string + +const ( + // ControllerStatusRunning means controller is in "running" state + ControllerStatusRunning ControllerStatus = "Running" + // ControllerStatusNotRunning means controller is in "notrunning" state + ControllerStatusNotRunning ControllerStatus = "NotRunning" +) + +// FederatedTypeConfigStatus defines the observed state of FederatedTypeConfig +type FederatedTypeConfigStatus struct { + // ObservedGeneration is the generation as observed by the controller consuming the FederatedTypeConfig. + ObservedGeneration int64 `json:"observedGeneration"` + // PropagationController tracks the status of the sync controller. + PropagationController ControllerStatus `json:"propagationController"` + // StatusController tracks the status of the status controller. + // +optional + StatusController *ControllerStatus `json:"statusController,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=federatedtypeconfigs,shortName=ftc +// +kubebuilder:subresource:status + +// FederatedTypeConfig programs KubeFed to know about a single API type - the +// "target type" - that a user wants to federate. For each target type, there is +// a corresponding FederatedType that has the following fields: +// +// - The "template" field specifies the basic definition of a federated resource +// - The "placement" field specifies the placement information for the federated +// resource +// - The "overrides" field specifies how the target resource should vary across +// clusters. +type FederatedTypeConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FederatedTypeConfigSpec `json:"spec"` + // +optional + Status FederatedTypeConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// FederatedTypeConfigList contains a list of FederatedTypeConfig +type FederatedTypeConfigList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []FederatedTypeConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&FederatedTypeConfig{}, &FederatedTypeConfigList{}) +} + +func SetFederatedTypeConfigDefaults(obj *FederatedTypeConfig) { + // TODO(marun) will name always be populated? + nameParts := strings.SplitN(obj.Name, ".", 2) + targetPluralName := nameParts[0] + setStringDefault(&obj.Spec.TargetType.PluralName, targetPluralName) + if len(nameParts) > 1 { + group := nameParts[1] + setStringDefault(&obj.Spec.TargetType.Group, group) + } + setStringDefault(&obj.Spec.FederatedType.PluralName, PluralName(obj.Spec.FederatedType.Kind)) + if obj.Spec.StatusType != nil { + setStringDefault(&obj.Spec.StatusType.PluralName, PluralName(obj.Spec.StatusType.Kind)) + setStringDefault(&obj.Spec.StatusType.Group, obj.Spec.FederatedType.Group) + setStringDefault(&obj.Spec.StatusType.Version, obj.Spec.FederatedType.Version) + } +} + +// GetDefaultedString returns the value if provided, and otherwise +// returns the provided default. +func setStringDefault(value *string, defaultValue string) { + if value == nil || len(*value) > 0 { + return + } + *value = defaultValue +} + +// PluralName computes the plural name from the kind by +// lowercasing and suffixing with 's' or `es`. +func PluralName(kind string) string { + lowerKind := strings.ToLower(kind) + if strings.HasSuffix(lowerKind, "s") || strings.HasSuffix(lowerKind, "x") || + strings.HasSuffix(lowerKind, "ch") || strings.HasSuffix(lowerKind, "sh") || + strings.HasSuffix(lowerKind, "z") || strings.HasSuffix(lowerKind, "o") { + return fmt.Sprintf("%ses", lowerKind) + } + if strings.HasSuffix(lowerKind, "y") { + lowerKind = strings.TrimSuffix(lowerKind, "y") + return fmt.Sprintf("%sies", lowerKind) + } + return fmt.Sprintf("%ss", lowerKind) +} + +func (f *FederatedTypeConfig) GetObjectMeta() metav1.ObjectMeta { + return f.ObjectMeta +} + +func (f *FederatedTypeConfig) GetTargetType() metav1.APIResource { + return apiResourceToMeta(f.Spec.TargetType, f.GetNamespaced()) +} + +// TODO(font): This method should be removed from the interface in favor of +// checking the namespaced property of the appropriate APIResource (TargetType, +// FederatedType) depending on context. +func (f *FederatedTypeConfig) GetNamespaced() bool { + return f.Spec.TargetType.Namespaced() +} + +func (f *FederatedTypeConfig) GetPropagationEnabled() bool { + return f.Spec.Propagation == PropagationEnabled +} + +func (f *FederatedTypeConfig) GetFederatedType() metav1.APIResource { + return apiResourceToMeta(f.Spec.FederatedType, f.GetFederatedNamespaced()) +} + +func (f *FederatedTypeConfig) GetStatusType() *metav1.APIResource { + if f.Spec.StatusType == nil { + return nil + } + metaAPIResource := apiResourceToMeta(*f.Spec.StatusType, f.Spec.StatusType.Namespaced()) + return &metaAPIResource +} + +func (f *FederatedTypeConfig) GetStatusEnabled() bool { + return f.Spec.StatusCollection != nil && + *f.Spec.StatusCollection == StatusCollectionEnabled && + f.Name == "services" +} + +// TODO(font): This method should be removed from the interface i.e. remove +// special-case handling for namespaces, in favor of checking the namespaced +// property of the appropriate APIResource (TargetType, FederatedType) +// depending on context. +func (f *FederatedTypeConfig) GetFederatedNamespaced() bool { + // Special-case the scope of federated namespace since it will + // hopefully be the only instance of the scope of a federated + // type differing from the scope of its target. + + if f.IsNamespace() { + // FederatedNamespace is namespaced to allow the control plane to run + // with only namespace-scoped permissions e.g. to determine placement. + return true + } + return f.GetNamespaced() +} + +func (f *FederatedTypeConfig) IsNamespace() bool { + return f.Name == common.NamespaceName +} + +func (a *APIResource) Namespaced() bool { + return a.Scope == apiextv1b1.NamespaceScoped +} + +func apiResourceToMeta(apiResource APIResource, namespaced bool) metav1.APIResource { + return metav1.APIResource{ + Group: apiResource.Group, + Version: apiResource.Version, + Kind: apiResource.Kind, + Name: apiResource.PluralName, + Namespaced: namespaced, + } +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/groupversion_info.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/groupversion_info.go new file mode 100644 index 000000000..a3503e6f5 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/groupversion_info.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1beta1 contains API Schema definitions for the core v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=core.kubefed.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "core.kubefed.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/kubefedcluster_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/kubefedcluster_types.go new file mode 100644 index 000000000..04da57488 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/kubefedcluster_types.go @@ -0,0 +1,125 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/kubefed/pkg/apis/core/common" +) + +type TLSValidation string + +const ( + TLSAll TLSValidation = "*" + TLSSubjectName TLSValidation = "SubjectName" + TLSValidityPeriod TLSValidation = "ValidityPeriod" +) + +// KubeFedClusterSpec defines the desired state of KubeFedCluster +type KubeFedClusterSpec struct { + // The API endpoint of the member cluster. This can be a hostname, + // hostname:port, IP or IP:port. + APIEndpoint string `json:"apiEndpoint"` + + // CABundle contains the certificate authority information. + // +optional + CABundle []byte `json:"caBundle,omitempty"` + + // Name of the secret containing the token required to access the + // member cluster. The secret needs to exist in the same namespace + // as the control plane and should have a "token" key. + SecretRef LocalSecretReference `json:"secretRef"` + + // DisabledTLSValidations defines a list of checks to ignore when validating + // the TLS connection to the member cluster. This can be any of *, SubjectName, or ValidityPeriod. + // If * is specified, it is expected to be the only option in list. + // +optional + DisabledTLSValidations []TLSValidation `json:"disabledTLSValidations,omitempty"` +} + +// LocalSecretReference is a reference to a secret within the enclosing +// namespace. +type LocalSecretReference struct { + // Name of a secret within the enclosing + // namespace + Name string `json:"name"` +} + +// KubeFedClusterStatus contains information about the current status of a +// cluster updated periodically by cluster controller. +type KubeFedClusterStatus struct { + // Conditions is an array of current cluster conditions. + Conditions []ClusterCondition `json:"conditions"` + // Zones are the names of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // +optional + Zones []string `json:"zones,omitempty"` + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + // +optional + Region *string `json:"region,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:name=age,type=date,JSONPath=.metadata.creationTimestamp +// +kubebuilder:printcolumn:name=ready,type=string,JSONPath=.status.conditions[?(@.type=='Ready')].status +// +kubebuilder:resource:path=kubefedclusters +// +kubebuilder:subresource:status + +// KubeFedCluster configures KubeFed to be aware of a Kubernetes +// cluster and encapsulates the details necessary to communicate with +// the cluster. +type KubeFedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KubeFedClusterSpec `json:"spec"` + // +optional + Status KubeFedClusterStatus `json:"status,omitempty"` +} + +// ClusterCondition describes current state of a cluster. +type ClusterCondition struct { + // Type of cluster condition, Ready or Offline. + Type common.ClusterConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status apiv1.ConditionStatus `json:"status"` + // Last time the condition was checked. + LastProbeTime metav1.Time `json:"lastProbeTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + // (brief) reason for the condition's last transition. + // +optional + Reason *string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message *string `json:"message,omitempty"` +} + +// +kubebuilder:object:root=true + +// KubeFedClusterList contains a list of KubeFedCluster +type KubeFedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KubeFedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&KubeFedCluster{}, &KubeFedClusterList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/kubefedconfig_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/kubefedconfig_types.go new file mode 100644 index 000000000..a05af4bef --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/kubefedconfig_types.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// KubeFedConfigSpec defines the desired state of KubeFedConfig +type KubeFedConfigSpec struct { + // The scope of the KubeFed control plane should be either + // `Namespaced` or `Cluster`. `Namespaced` indicates that the + // KubeFed namespace will be the only target of the control plane. + Scope apiextv1b1.ResourceScope `json:"scope"` + // +optional + ControllerDuration *DurationConfig `json:"controllerDuration,omitempty"` + // +optional + LeaderElect *LeaderElectConfig `json:"leaderElect,omitempty"` + // +optional + FeatureGates []FeatureGatesConfig `json:"featureGates,omitempty"` + // +optional + ClusterHealthCheck *ClusterHealthCheckConfig `json:"clusterHealthCheck,omitempty"` + // +optional + SyncController *SyncControllerConfig `json:"syncController,omitempty"` +} + +type DurationConfig struct { + // Time to wait before reconciling on a healthy cluster. + // +optional + AvailableDelay *metav1.Duration `json:"availableDelay,omitempty"` + // Time to wait before giving up on an unhealthy cluster. + // +optional + UnavailableDelay *metav1.Duration `json:"unavailableDelay,omitempty"` +} +type LeaderElectConfig struct { + // The duration that non-leader candidates will wait after observing a leadership + // renewal until attempting to acquire leadership of a led but unrenewed leader + // slot. This is effectively the maximum duration that a leader can be stopped + // before it is replaced by another candidate. This is only applicable if leader + // election is enabled. + // +optional + LeaseDuration *metav1.Duration `json:"leaseDuration,omitempty"` + // The interval between attempts by the acting master to renew a leadership slot + // before it stops leading. This must be less than or equal to the lease duration. + // This is only applicable if leader election is enabled. + // +optional + RenewDeadline *metav1.Duration `json:"renewDeadline,omitempty"` + // The duration the clients should wait between attempting acquisition and renewal + // of a leadership. This is only applicable if leader election is enabled. + // +optional + RetryPeriod *metav1.Duration `json:"retryPeriod,omitempty"` + // The type of resource object that is used for locking during + // leader election. Supported options are `configmaps` (default) and `endpoints`. + // +optional + ResourceLock *ResourceLockType `json:"resourceLock,omitempty"` +} + +type ResourceLockType string + +const ( + ConfigMapsResourceLock ResourceLockType = "configmaps" + EndpointsResourceLock ResourceLockType = "endpoints" +) + +type FeatureGatesConfig struct { + Name string `json:"name"` + Configuration ConfigurationMode `json:"configuration"` +} + +type ConfigurationMode string + +const ( + ConfigurationEnabled ConfigurationMode = "Enabled" + ConfigurationDisabled ConfigurationMode = "Disabled" +) + +type ClusterHealthCheckConfig struct { + // How often to monitor the cluster health. + // +optional + Period *metav1.Duration `json:"period,omitempty"` + // Minimum consecutive failures for the cluster health to be considered failed after having succeeded. + // +optional + FailureThreshold *int64 `json:"failureThreshold,omitempty"` + // Minimum consecutive successes for the cluster health to be considered successful after having failed. + // +optional + SuccessThreshold *int64 `json:"successThreshold,omitempty"` + // Duration after which the cluster health check times out. + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` +} + +type SyncControllerConfig struct { + // Whether to adopt pre-existing resources in member clusters. Defaults to + // "Enabled". + // +optional + AdoptResources *ResourceAdoption `json:"adoptResources,omitempty"` +} + +type ResourceAdoption string + +const ( + AdoptResourcesEnabled ResourceAdoption = "Enabled" + AdoptResourcesDisabled ResourceAdoption = "Disabled" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kubefedconfigs + +type KubeFedConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KubeFedConfigSpec `json:"spec"` +} + +// +kubebuilder:object:root=true + +// KubeFedConfigList contains a list of KubeFedConfig +type KubeFedConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KubeFedConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&KubeFedConfig{}, &KubeFedConfigList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..694be4fa2 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,537 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResource) DeepCopyInto(out *APIResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource. +func (in *APIResource) DeepCopy() *APIResource { + if in == nil { + return nil + } + out := new(APIResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(string) + **out = **in + } + if in.Message != nil { + in, out := &in.Message, &out.Message + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterHealthCheckConfig) DeepCopyInto(out *ClusterHealthCheckConfig) { + *out = *in + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(v1.Duration) + **out = **in + } + if in.FailureThreshold != nil { + in, out := &in.FailureThreshold, &out.FailureThreshold + *out = new(int64) + **out = **in + } + if in.SuccessThreshold != nil { + in, out := &in.SuccessThreshold, &out.SuccessThreshold + *out = new(int64) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterHealthCheckConfig. +func (in *ClusterHealthCheckConfig) DeepCopy() *ClusterHealthCheckConfig { + if in == nil { + return nil + } + out := new(ClusterHealthCheckConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DurationConfig) DeepCopyInto(out *DurationConfig) { + *out = *in + if in.AvailableDelay != nil { + in, out := &in.AvailableDelay, &out.AvailableDelay + *out = new(v1.Duration) + **out = **in + } + if in.UnavailableDelay != nil { + in, out := &in.UnavailableDelay, &out.UnavailableDelay + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DurationConfig. +func (in *DurationConfig) DeepCopy() *DurationConfig { + if in == nil { + return nil + } + out := new(DurationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGatesConfig) DeepCopyInto(out *FeatureGatesConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGatesConfig. +func (in *FeatureGatesConfig) DeepCopy() *FeatureGatesConfig { + if in == nil { + return nil + } + out := new(FeatureGatesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedTypeConfig) DeepCopyInto(out *FederatedTypeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedTypeConfig. +func (in *FederatedTypeConfig) DeepCopy() *FederatedTypeConfig { + if in == nil { + return nil + } + out := new(FederatedTypeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FederatedTypeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedTypeConfigList) DeepCopyInto(out *FederatedTypeConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FederatedTypeConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedTypeConfigList. +func (in *FederatedTypeConfigList) DeepCopy() *FederatedTypeConfigList { + if in == nil { + return nil + } + out := new(FederatedTypeConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FederatedTypeConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedTypeConfigSpec) DeepCopyInto(out *FederatedTypeConfigSpec) { + *out = *in + out.TargetType = in.TargetType + out.FederatedType = in.FederatedType + if in.StatusType != nil { + in, out := &in.StatusType, &out.StatusType + *out = new(APIResource) + **out = **in + } + if in.StatusCollection != nil { + in, out := &in.StatusCollection, &out.StatusCollection + *out = new(StatusCollectionMode) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedTypeConfigSpec. +func (in *FederatedTypeConfigSpec) DeepCopy() *FederatedTypeConfigSpec { + if in == nil { + return nil + } + out := new(FederatedTypeConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FederatedTypeConfigStatus) DeepCopyInto(out *FederatedTypeConfigStatus) { + *out = *in + if in.StatusController != nil { + in, out := &in.StatusController, &out.StatusController + *out = new(ControllerStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedTypeConfigStatus. +func (in *FederatedTypeConfigStatus) DeepCopy() *FederatedTypeConfigStatus { + if in == nil { + return nil + } + out := new(FederatedTypeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedCluster) DeepCopyInto(out *KubeFedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedCluster. +func (in *KubeFedCluster) DeepCopy() *KubeFedCluster { + if in == nil { + return nil + } + out := new(KubeFedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeFedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedClusterList) DeepCopyInto(out *KubeFedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeFedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedClusterList. +func (in *KubeFedClusterList) DeepCopy() *KubeFedClusterList { + if in == nil { + return nil + } + out := new(KubeFedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeFedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedClusterSpec) DeepCopyInto(out *KubeFedClusterSpec) { + *out = *in + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.SecretRef = in.SecretRef + if in.DisabledTLSValidations != nil { + in, out := &in.DisabledTLSValidations, &out.DisabledTLSValidations + *out = make([]TLSValidation, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedClusterSpec. +func (in *KubeFedClusterSpec) DeepCopy() *KubeFedClusterSpec { + if in == nil { + return nil + } + out := new(KubeFedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedClusterStatus) DeepCopyInto(out *KubeFedClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedClusterStatus. +func (in *KubeFedClusterStatus) DeepCopy() *KubeFedClusterStatus { + if in == nil { + return nil + } + out := new(KubeFedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedConfig) DeepCopyInto(out *KubeFedConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedConfig. +func (in *KubeFedConfig) DeepCopy() *KubeFedConfig { + if in == nil { + return nil + } + out := new(KubeFedConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeFedConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedConfigList) DeepCopyInto(out *KubeFedConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeFedConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedConfigList. +func (in *KubeFedConfigList) DeepCopy() *KubeFedConfigList { + if in == nil { + return nil + } + out := new(KubeFedConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeFedConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeFedConfigSpec) DeepCopyInto(out *KubeFedConfigSpec) { + *out = *in + if in.ControllerDuration != nil { + in, out := &in.ControllerDuration, &out.ControllerDuration + *out = new(DurationConfig) + (*in).DeepCopyInto(*out) + } + if in.LeaderElect != nil { + in, out := &in.LeaderElect, &out.LeaderElect + *out = new(LeaderElectConfig) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]FeatureGatesConfig, len(*in)) + copy(*out, *in) + } + if in.ClusterHealthCheck != nil { + in, out := &in.ClusterHealthCheck, &out.ClusterHealthCheck + *out = new(ClusterHealthCheckConfig) + (*in).DeepCopyInto(*out) + } + if in.SyncController != nil { + in, out := &in.SyncController, &out.SyncController + *out = new(SyncControllerConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedConfigSpec. +func (in *KubeFedConfigSpec) DeepCopy() *KubeFedConfigSpec { + if in == nil { + return nil + } + out := new(KubeFedConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElectConfig) DeepCopyInto(out *LeaderElectConfig) { + *out = *in + if in.LeaseDuration != nil { + in, out := &in.LeaseDuration, &out.LeaseDuration + *out = new(v1.Duration) + **out = **in + } + if in.RenewDeadline != nil { + in, out := &in.RenewDeadline, &out.RenewDeadline + *out = new(v1.Duration) + **out = **in + } + if in.RetryPeriod != nil { + in, out := &in.RetryPeriod, &out.RetryPeriod + *out = new(v1.Duration) + **out = **in + } + if in.ResourceLock != nil { + in, out := &in.ResourceLock, &out.ResourceLock + *out = new(ResourceLockType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectConfig. +func (in *LeaderElectConfig) DeepCopy() *LeaderElectConfig { + if in == nil { + return nil + } + out := new(LeaderElectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSecretReference) DeepCopyInto(out *LocalSecretReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecretReference. +func (in *LocalSecretReference) DeepCopy() *LocalSecretReference { + if in == nil { + return nil + } + out := new(LocalSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncControllerConfig) DeepCopyInto(out *SyncControllerConfig) { + *out = *in + if in.AdoptResources != nil { + in, out := &in.AdoptResources, &out.AdoptResources + *out = new(ResourceAdoption) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncControllerConfig. +func (in *SyncControllerConfig) DeepCopy() *SyncControllerConfig { + if in == nil { + return nil + } + out := new(SyncControllerConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/dnsendpoint_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/dnsendpoint_types.go new file mode 100644 index 000000000..9ad739ca7 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/dnsendpoint_types.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Targets is a representation of a list of targets for an endpoint. +type Targets []string + +// TTL is a structure defining the TTL of a DNS record +type TTL int64 + +// Labels store metadata related to the endpoint +// it is then stored in a persistent storage via serialization +type Labels map[string]string + +// Endpoint is a high-level association between a service and an IP. +type Endpoint struct { + // The FQDN of the DNS record. + DNSName string `json:"dnsName,omitempty"` + // The targets that the DNS record points to. + Targets Targets `json:"targets,omitempty"` + // RecordType type of record, e.g. CNAME, A, SRV, TXT etc. + RecordType string `json:"recordType,omitempty"` + // TTL for the record in seconds. + RecordTTL TTL `json:"recordTTL,omitempty"` + // Labels stores labels defined for the Endpoint. + // +optional + Labels Labels `json:"labels,omitempty"` +} + +// DNSEndpointSpec defines the desired state of DNSEndpoint +type DNSEndpointSpec struct { + Endpoints []*Endpoint `json:"endpoints,omitempty"` +} + +// DNSEndpointStatus defines the observed state of DNSEndpoint +type DNSEndpointStatus struct { + // ObservedGeneration is the generation as observed by the controller consuming the DNSEndpoint. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnsendpoints +// +kubebuilder:subresource:status + +// DNSEndpoint is the CRD wrapper for Endpoint which is designed to act as a +// source of truth for external-dns. +type DNSEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DNSEndpointSpec `json:"spec,omitempty"` + Status DNSEndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// DNSEndpointList contains a list of DNSEndpoint +type DNSEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNSEndpoint `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DNSEndpoint{}, &DNSEndpointList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/domain_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/domain_types.go new file mode 100644 index 000000000..b28ac1be1 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/domain_types.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=domains + +type Domain struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Domain is the DNS zone associated with the KubeFed control plane + Domain string `json:"domain"` + // NameServer is the authoritative DNS name server for the KubeFed domain + NameServer string `json:"nameServer,omitempty"` +} + +// +kubebuilder:object:root=true + +// DomainList contains a list of Domain +type DomainList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Domain `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Domain{}, &DomainList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/groupversion_info.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..f3dab7fcc --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/groupversion_info.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the multiclusterdns v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=multiclusterdns.kubefed.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "multiclusterdns.kubefed.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/ingressdnsrecord_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/ingressdnsrecord_types.go new file mode 100644 index 000000000..7a7c8a7c7 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/ingressdnsrecord_types.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IngressDNSRecordSpec defines the desired state of IngressDNSRecord +type IngressDNSRecordSpec struct { + // Host from the IngressRule in Cluster Ingress Spec + Hosts []string `json:"hosts,omitempty"` + // RecordTTL is the TTL in seconds for DNS records created for the Ingress, if omitted a default would be used + RecordTTL TTL `json:"recordTTL,omitempty"` +} + +// IngressDNSRecordStatus defines the observed state of IngressDNSRecord +type IngressDNSRecordStatus struct { + // Array of Ingress Controller LoadBalancers + DNS []ClusterIngressDNS `json:"dns,omitempty"` +} + +// ClusterIngressDNS defines the observed status of Ingress within a cluster. +type ClusterIngressDNS struct { + // Cluster name + Cluster string `json:"cluster,omitempty"` + // LoadBalancer for the corresponding ingress controller + LoadBalancer corev1.LoadBalancerStatus `json:"loadBalancer,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=ingressdnsrecords +// +kubebuilder:subresource:status + +type IngressDNSRecord struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IngressDNSRecordSpec `json:"spec,omitempty"` + Status IngressDNSRecordStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// IngressDNSRecordList contains a list of IngressDNSRecord +type IngressDNSRecordList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IngressDNSRecord `json:"items"` +} + +func init() { + SchemeBuilder.Register(&IngressDNSRecord{}, &IngressDNSRecordList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/servicednsrecord_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/servicednsrecord_types.go new file mode 100644 index 000000000..2d920c0e2 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/servicednsrecord_types.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServiceDNSRecordSpec defines the desired state of ServiceDNSRecord. +type ServiceDNSRecordSpec struct { + // DomainRef is the name of the domain object to which the corresponding federated service belongs + DomainRef string `json:"domainRef"` + // RecordTTL is the TTL in seconds for DNS records created for this Service, if omitted a default would be used + RecordTTL TTL `json:"recordTTL,omitempty"` + // DNSPrefix when specified, an additional DNS record would be created with . + DNSPrefix string `json:"dnsPrefix,omitempty"` + // ExternalName when specified, replaces the service name portion of a resource record + // with the value of ExternalName. + ExternalName string `json:"externalName,omitempty"` + // AllowServiceWithoutEndpoints allows DNS records to be written for Service shards without endpoints + AllowServiceWithoutEndpoints bool `json:"allowServiceWithoutEndpoints,omitempty"` +} + +// ServiceDNSRecordStatus defines the observed state of ServiceDNSRecord. +type ServiceDNSRecordStatus struct { + // Domain is the DNS domain of the KubeFed control plane as in Domain API + Domain string `json:"domain,omitempty"` + DNS []ClusterDNS `json:"dns,omitempty"` +} + +// ClusterDNS defines the observed status of LoadBalancer within a cluster. +type ClusterDNS struct { + // Cluster name + Cluster string `json:"cluster,omitempty"` + // LoadBalancer for the corresponding service + LoadBalancer corev1.LoadBalancerStatus `json:"loadBalancer,omitempty"` + // Zones to which the cluster belongs + Zones []string `json:"zones,omitempty"` + // Region to which the cluster belongs + Region string `json:"region,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceDNSRecord defines a scheme of DNS name and subdomains that +// should be programmed with endpoint information about a Service deployed in +// multiple Kubernetes clusters. ServiceDNSRecord is name-associated +// with the Services it programs endpoint information for, meaning that a +// ServiceDNSRecord expresses the intent to program DNS with +// information about endpoints for the Kubernetes Service resources with the +// same name and namespace in different clusters. +// +// For the example, given the following values: +// +// metadata.name: test-service +// metadata.namespace: test-namespace +// spec.federationName: test-federation +// +// the following set of DNS names will be programmed: +// +// Global Level: test-service.test-namespace.test-federation.svc. +// Region Level: test-service.test-namespace.test-federation.svc.(status.DNS[*].region). +// Zone Level : test-service.test-namespace.test-federation.svc.(status.DNS[*].zone).(status.DNS[*].region). +// +// Optionally, when DNSPrefix is specified, another DNS name will be programmed +// which would be a CNAME record pointing to DNS name at global level as below: +// . --> test-service.test-namespace.test-federation.svc. +// +// +k8s:openapi-gen=true +// +kubebuilder:resource:path=servicednsrecords +// +kubebuilder:subresource:status +type ServiceDNSRecord struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ServiceDNSRecordSpec `json:"spec,omitempty"` + Status ServiceDNSRecordStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceDNSRecordList contains a list of ServiceDNSRecord +type ServiceDNSRecordList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServiceDNSRecord `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ServiceDNSRecord{}, &ServiceDNSRecordList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..2cfa792f5 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,483 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDNS) DeepCopyInto(out *ClusterDNS) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDNS. +func (in *ClusterDNS) DeepCopy() *ClusterDNS { + if in == nil { + return nil + } + out := new(ClusterDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterIngressDNS) DeepCopyInto(out *ClusterIngressDNS) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIngressDNS. +func (in *ClusterIngressDNS) DeepCopy() *ClusterIngressDNS { + if in == nil { + return nil + } + out := new(ClusterIngressDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpoint) DeepCopyInto(out *DNSEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpoint. +func (in *DNSEndpoint) DeepCopy() *DNSEndpoint { + if in == nil { + return nil + } + out := new(DNSEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpointList) DeepCopyInto(out *DNSEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointList. +func (in *DNSEndpointList) DeepCopy() *DNSEndpointList { + if in == nil { + return nil + } + out := new(DNSEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpointSpec) DeepCopyInto(out *DNSEndpointSpec) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]*Endpoint, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointSpec. +func (in *DNSEndpointSpec) DeepCopy() *DNSEndpointSpec { + if in == nil { + return nil + } + out := new(DNSEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSEndpointStatus) DeepCopyInto(out *DNSEndpointStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointStatus. +func (in *DNSEndpointStatus) DeepCopy() *DNSEndpointStatus { + if in == nil { + return nil + } + out := new(DNSEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Domain) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainList) DeepCopyInto(out *DomainList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Domain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList. +func (in *DomainList) DeepCopy() *DomainList { + if in == nil { + return nil + } + out := new(DomainList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DomainList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoint) DeepCopyInto(out *Endpoint) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make(Targets, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. +func (in *Endpoint) DeepCopy() *Endpoint { + if in == nil { + return nil + } + out := new(Endpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressDNSRecord) DeepCopyInto(out *IngressDNSRecord) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecord. +func (in *IngressDNSRecord) DeepCopy() *IngressDNSRecord { + if in == nil { + return nil + } + out := new(IngressDNSRecord) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressDNSRecord) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressDNSRecordList) DeepCopyInto(out *IngressDNSRecordList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressDNSRecord, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecordList. +func (in *IngressDNSRecordList) DeepCopy() *IngressDNSRecordList { + if in == nil { + return nil + } + out := new(IngressDNSRecordList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressDNSRecordList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressDNSRecordSpec) DeepCopyInto(out *IngressDNSRecordSpec) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecordSpec. +func (in *IngressDNSRecordSpec) DeepCopy() *IngressDNSRecordSpec { + if in == nil { + return nil + } + out := new(IngressDNSRecordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressDNSRecordStatus) DeepCopyInto(out *IngressDNSRecordStatus) { + *out = *in + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]ClusterIngressDNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecordStatus. +func (in *IngressDNSRecordStatus) DeepCopy() *IngressDNSRecordStatus { + if in == nil { + return nil + } + out := new(IngressDNSRecordStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Labels) DeepCopyInto(out *Labels) { + { + in := &in + *out = make(Labels, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Labels. +func (in Labels) DeepCopy() Labels { + if in == nil { + return nil + } + out := new(Labels) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDNSRecord) DeepCopyInto(out *ServiceDNSRecord) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecord. +func (in *ServiceDNSRecord) DeepCopy() *ServiceDNSRecord { + if in == nil { + return nil + } + out := new(ServiceDNSRecord) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceDNSRecord) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDNSRecordList) DeepCopyInto(out *ServiceDNSRecordList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceDNSRecord, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecordList. +func (in *ServiceDNSRecordList) DeepCopy() *ServiceDNSRecordList { + if in == nil { + return nil + } + out := new(ServiceDNSRecordList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceDNSRecordList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDNSRecordSpec) DeepCopyInto(out *ServiceDNSRecordSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecordSpec. +func (in *ServiceDNSRecordSpec) DeepCopy() *ServiceDNSRecordSpec { + if in == nil { + return nil + } + out := new(ServiceDNSRecordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceDNSRecordStatus) DeepCopyInto(out *ServiceDNSRecordStatus) { + *out = *in + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]ClusterDNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecordStatus. +func (in *ServiceDNSRecordStatus) DeepCopy() *ServiceDNSRecordStatus { + if in == nil { + return nil + } + out := new(ServiceDNSRecordStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Targets) DeepCopyInto(out *Targets) { + { + in := &in + *out = make(Targets, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Targets. +func (in Targets) DeepCopy() Targets { + if in == nil { + return nil + } + out := new(Targets) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/groupversion_info.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/groupversion_info.go new file mode 100644 index 000000000..fbde7b768 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/groupversion_info.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the scheduling v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=scheduling.kubefed.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "scheduling.kubefed.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/replicaschedulingpreference_types.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/replicaschedulingpreference_types.go new file mode 100644 index 000000000..adffd1841 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/replicaschedulingpreference_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ReplicaSchedulingPreferenceSpec defines the desired state of ReplicaSchedulingPreference +type ReplicaSchedulingPreferenceSpec struct { + //TODO (@irfanurrehman); upgrade this to label selector only if need be. + // The idea of this API is to have a a set of preferences which can + // be used for a target FederatedDeployment or FederatedReplicaset. + // Although the set of preferences in question can be applied to multiple + // target objects using label selectors, but there are no clear advantages + // of doing that as of now. + // To keep the implementation and usage simple, matching ns/name of RSP + // resource to the target resource is sufficient and only additional information + // needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset). + TargetKind string `json:"targetKind"` + + // Total number of pods desired across federated clusters. + // Replicas specified in the spec for target deployment template or replicaset + // template will be discarded/overridden when scheduling preferences are + // specified. + TotalReplicas int32 `json:"totalReplicas"` + + // If set to true then already scheduled and running replicas may be moved to other clusters + // in order to match current state to the specified preferences. Otherwise, if set to false, + // up and running replicas will not be moved. + // +optional + Rebalance bool `json:"rebalance,omitempty"` + + // A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in + // these clusters. + // "*" (if provided) applies to all clusters if an explicit mapping is not provided. + // If omitted, clusters without explicit preferences should not have any replicas scheduled. + // +optional + Clusters map[string]ClusterPreferences `json:"clusters,omitempty"` +} + +// Preferences regarding number of replicas assigned to a cluster workload object (dep, rs, ..) within +// a federated workload object. +type ClusterPreferences struct { + // Minimum number of replicas that should be assigned to this cluster workload object. 0 by default. + // +optional + MinReplicas int64 `json:"minReplicas,omitempty"` + + // Maximum number of replicas that should be assigned to this cluster workload object. + // Unbounded if no value provided (default). + // +optional + MaxReplicas *int64 `json:"maxReplicas,omitempty"` + + // A number expressing the preference to put an additional replica to this cluster workload object. + // 0 by default. + Weight int64 `json:"weight,omitempty"` +} + +// ReplicaSchedulingPreferenceStatus defines the observed state of ReplicaSchedulingPreference +type ReplicaSchedulingPreferenceStatus struct { +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=replicaschedulingpreferences + +type ReplicaSchedulingPreference struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ReplicaSchedulingPreferenceSpec `json:"spec,omitempty"` + Status ReplicaSchedulingPreferenceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ReplicaSchedulingPreferenceList contains a list of ReplicaSchedulingPreference +type ReplicaSchedulingPreferenceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ReplicaSchedulingPreference `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ReplicaSchedulingPreference{}, &ReplicaSchedulingPreferenceList{}) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bbb678e28 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPreferences) DeepCopyInto(out *ClusterPreferences) { + *out = *in + if in.MaxReplicas != nil { + in, out := &in.MaxReplicas, &out.MaxReplicas + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPreferences. +func (in *ClusterPreferences) DeepCopy() *ClusterPreferences { + if in == nil { + return nil + } + out := new(ClusterPreferences) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSchedulingPreference) DeepCopyInto(out *ReplicaSchedulingPreference) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingPreference. +func (in *ReplicaSchedulingPreference) DeepCopy() *ReplicaSchedulingPreference { + if in == nil { + return nil + } + out := new(ReplicaSchedulingPreference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSchedulingPreference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSchedulingPreferenceList) DeepCopyInto(out *ReplicaSchedulingPreferenceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSchedulingPreference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingPreferenceList. +func (in *ReplicaSchedulingPreferenceList) DeepCopy() *ReplicaSchedulingPreferenceList { + if in == nil { + return nil + } + out := new(ReplicaSchedulingPreferenceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSchedulingPreferenceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSchedulingPreferenceSpec) DeepCopyInto(out *ReplicaSchedulingPreferenceSpec) { + *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make(map[string]ClusterPreferences, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingPreferenceSpec. +func (in *ReplicaSchedulingPreferenceSpec) DeepCopy() *ReplicaSchedulingPreferenceSpec { + if in == nil { + return nil + } + out := new(ReplicaSchedulingPreferenceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSchedulingPreferenceStatus) DeepCopyInto(out *ReplicaSchedulingPreferenceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingPreferenceStatus. +func (in *ReplicaSchedulingPreferenceStatus) DeepCopy() *ReplicaSchedulingPreferenceStatus { + if in == nil { + return nil + } + out := new(ReplicaSchedulingPreferenceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/client/generic/genericclient.go b/vendor/sigs.k8s.io/kubefed/pkg/client/generic/genericclient.go new file mode 100644 index 000000000..eff5602d8 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/client/generic/genericclient.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/kubefed/pkg/client/generic/scheme" +) + +type Client interface { + Create(ctx context.Context, obj runtime.Object) error + Get(ctx context.Context, obj runtime.Object, namespace, name string) error + Update(ctx context.Context, obj runtime.Object) error + Delete(ctx context.Context, obj runtime.Object, namespace, name string) error + List(ctx context.Context, obj runtime.Object, namespace string, opts ...client.ListOption) error + UpdateStatus(ctx context.Context, obj runtime.Object) error +} + +type genericClient struct { + client client.Client +} + +func New(config *rest.Config) (Client, error) { + client, err := client.New(config, client.Options{Scheme: scheme.Scheme}) + return &genericClient{client}, err +} + +func NewForConfigOrDie(config *rest.Config) Client { + client, err := New(config) + if err != nil { + panic(err) + } + return client +} + +func NewForConfigOrDieWithUserAgent(config *rest.Config, userAgent string) Client { + configCopy := rest.CopyConfig(config) + rest.AddUserAgent(configCopy, userAgent) + return NewForConfigOrDie(configCopy) +} + +func (c *genericClient) Create(ctx context.Context, obj runtime.Object) error { + return c.client.Create(ctx, obj) +} + +func (c *genericClient) Get(ctx context.Context, obj runtime.Object, namespace, name string) error { + return c.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, obj) +} + +func (c *genericClient) Update(ctx context.Context, obj runtime.Object) error { + return c.client.Update(ctx, obj) +} + +func (c *genericClient) Delete(ctx context.Context, obj runtime.Object, namespace, name string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetNamespace(namespace) + accessor.SetName(name) + return c.client.Delete(ctx, obj) +} + +func (c *genericClient) List(ctx context.Context, obj runtime.Object, namespace string, opts ...client.ListOption) error { + opts = append(opts, client.InNamespace(namespace)) + return c.client.List(ctx, obj, opts...) +} + +func (c *genericClient) UpdateStatus(ctx context.Context, obj runtime.Object) error { + return c.client.Status().Update(ctx, obj) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/client/generic/scheme/register.go b/vendor/sigs.k8s.io/kubefed/pkg/client/generic/scheme/register.go new file mode 100644 index 000000000..e43268c39 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/client/generic/scheme/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + k8sscheme "k8s.io/client-go/kubernetes/scheme" + + fedapis "sigs.k8s.io/kubefed/pkg/apis" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + fedapis.AddToScheme, + k8sscheme.AddToScheme, +} + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} + +var AddToScheme = localSchemeBuilder.AddToScheme diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/backoff.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/backoff.go new file mode 100644 index 000000000..b5b01213a --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/backoff.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" + + "k8s.io/client-go/util/flowcontrol" +) + +func StartBackoffGC(backoff *flowcontrol.Backoff, stopCh <-chan struct{}) { + go func() { + for { + select { + case <-time.After(time.Minute): + backoff.GC() + case <-stopCh: + return + } + } + }() +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/cluster_util.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/cluster_util.go new file mode 100644 index 000000000..dd2a5d73d --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/cluster_util.go @@ -0,0 +1,221 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "crypto/tls" + "crypto/x509" + "net" + "net/http" + "net/url" + "time" + + "github.com/pkg/errors" + + apiv1 "k8s.io/api/core/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/transport" + "k8s.io/klog" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + "sigs.k8s.io/kubefed/pkg/client/generic" +) + +const ( + DefaultKubeFedSystemNamespace = "kube-federation-system" + + KubeAPIQPS = 20.0 + KubeAPIBurst = 30 + TokenKey = "token" + + KubeFedConfigName = "kubefed" +) + +// BuildClusterConfig returns a restclient.Config that can be used to configure +// a client for the given KubeFedCluster or an error. The client is used to +// access kubernetes secrets in the kubefed namespace. +func BuildClusterConfig(fedCluster *fedv1b1.KubeFedCluster, client generic.Client, fedNamespace string) (*restclient.Config, error) { + clusterName := fedCluster.Name + + apiEndpoint := fedCluster.Spec.APIEndpoint + // TODO(marun) Remove when validation ensures a non-empty value. + if apiEndpoint == "" { + return nil, errors.Errorf("The api endpoint of cluster %s is empty", clusterName) + } + + secretName := fedCluster.Spec.SecretRef.Name + if secretName == "" { + return nil, errors.Errorf("Cluster %s does not have a secret name", clusterName) + } + secret := &apiv1.Secret{} + err := client.Get(context.TODO(), secret, fedNamespace, secretName) + if err != nil { + return nil, err + } + + token, tokenFound := secret.Data[TokenKey] + if !tokenFound || len(token) == 0 { + return nil, errors.Errorf("The secret for cluster %s is missing a non-empty value for %q", clusterName, TokenKey) + } + + clusterConfig, err := clientcmd.BuildConfigFromFlags(apiEndpoint, "") + if err != nil { + return nil, err + } + clusterConfig.CAData = fedCluster.Spec.CABundle + clusterConfig.BearerToken = string(token) + clusterConfig.QPS = KubeAPIQPS + clusterConfig.Burst = KubeAPIBurst + + if len(fedCluster.Spec.DisabledTLSValidations) != 0 { + klog.V(1).Infof("Cluster %s will use a custom transport for TLS certificate validation", fedCluster.Name) + if err = CustomizeTLSTransport(fedCluster, clusterConfig); err != nil { + return nil, err + } + } + + return clusterConfig, nil +} + +// IsPrimaryCluster checks if the caller is working with objects for the +// primary cluster by checking if the UIDs match for both ObjectMetas passed +// in. +// TODO (font): Need to revisit this when cluster ID is available. +func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool { + meta := MetaAccessor(obj) + clusterMeta := MetaAccessor(clusterObj) + return meta.GetUID() == clusterMeta.GetUID() +} + +// CustomizeTLSTransport replaces the restclient.Config.Transport with one that +// implements the desired TLS certificate validations +func CustomizeTLSTransport(fedCluster *fedv1b1.KubeFedCluster, clientConfig *restclient.Config) error { + clientTransportConfig, err := clientConfig.TransportConfig() + if err != nil { + return errors.Errorf("Cluster %s client transport config error: %s", fedCluster.Name, err) + } + transportConfig, err := transport.TLSConfigFor(clientTransportConfig) + if err != nil { + return errors.Errorf("Cluster %s transport error: %s", fedCluster.Name, err) + } + + err = CustomizeCertificateValidation(fedCluster, transportConfig) + if err != nil { + return errors.Errorf("Cluster %s custom certificate validation error: %s", fedCluster.Name, err) + } + + // using the same defaults as http.DefaultTransport + clientConfig.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: transportConfig, + } + clientConfig.TLSClientConfig = restclient.TLSClientConfig{} + return nil +} + +// CustomizeCertificateValidation modifies an existing tls.Config to disable the +// desired TLS checks in KubeFedCluster config +func CustomizeCertificateValidation(fedCluster *fedv1b1.KubeFedCluster, tlsConfig *tls.Config) error { + // InsecureSkipVerify must be enabled to prevent early validation errors from + // returning before VerifyPeerCertificate is run + tlsConfig.InsecureSkipVerify = true + + var ignoreSubjectName, ignoreValidityPeriod bool + for _, validation := range fedCluster.Spec.DisabledTLSValidations { + switch fedv1b1.TLSValidation(validation) { + case fedv1b1.TLSAll: + klog.V(1).Infof("Cluster %s will not perform TLS certificate validation", fedCluster.Name) + return nil + case fedv1b1.TLSSubjectName: + ignoreSubjectName = true + case fedv1b1.TLSValidityPeriod: + ignoreValidityPeriod = true + } + } + + // Normal TLS SubjectName validation uses the conn dnsname for validation, + // but this is not available when using a VerifyPeerCertificate functions. + // As a workaround, we will fill the tls.Config.ServerName with the URL host + // specified as the KubeFedCluster API target + if !ignoreSubjectName && tlsConfig.ServerName == "" { + apiURL, err := url.Parse(fedCluster.Spec.APIEndpoint) + if err != nil { + return errors.Errorf("failed to identify a valid host from APIEndpoint for use in SubjectName validation") + } + tlsConfig.ServerName = apiURL.Hostname() + } + + // VerifyPeerCertificate uses the same logic as crypto/tls Conn.verifyServerCertificate + // but uses a modified set of options to ignore specific validations + tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, verifiedChains [][]*x509.Certificate) error { + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + Intermediates: x509.NewCertPool(), + DNSName: tlsConfig.ServerName, + } + if tlsConfig.Time != nil { + opts.CurrentTime = tlsConfig.Time() + } + + certs := make([]*x509.Certificate, len(certificates)) + for i, asn1Data := range certificates { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return errors.New("tls: failed to parse certificate from server: " + err.Error()) + } + certs[i] = cert + } + + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if ignoreSubjectName { + // set the DNSName to nil to ignore the name validation + opts.DNSName = "" + klog.V(1).Infof("Cluster %s will not perform tls certificate SubjectName validation", fedCluster.Name) + } + if ignoreValidityPeriod { + // set the CurrentTime to immediately after the certificate start time + // this will ensure that certificate passes the validity period check + opts.CurrentTime = certs[0].NotBefore.Add(time.Second) + klog.V(1).Infof("Cluster %s will not perform tls certificate ValidityPeriod validation", fedCluster.Name) + } + + _, err := certs[0].Verify(opts) + + return err + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/constants.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/constants.go new file mode 100644 index 000000000..4c2b7f780 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/constants.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" +) + +// Providing 0 duration to an informer indicates that resync should be delayed as long as possible +const ( + NoResyncPeriod time.Duration = 0 * time.Second + + NamespaceName = "namespaces" + NamespaceKind = "Namespace" + + ServiceKind = "Service" + + ServiceAccountKind = "ServiceAccount" + + // The following fields are used to interact with unstructured + // resources. + + // Common fields + SpecField = "spec" + StatusField = "status" + MetadataField = "metadata" + + // ServiceAccount fields + SecretsField = "secrets" + + // Scale types + ReplicasField = "replicas" + RetainReplicasField = "retainReplicas" + + // Template fields + TemplateField = "template" + + // Placement fields + PlacementField = "placement" + ClusterSelectorField = "clusterSelector" + MatchLabelsField = "matchLabels" + + // Override fields + OverridesField = "overrides" + ClusterNameField = "clusterName" + ClusterOverridesField = "clusterOverrides" + PathField = "path" + ValueField = "value" + + // Cluster reference + ClustersField = "clusters" + NameField = "name" +) + +type ReconciliationStatus int + +const ( + StatusAllOK ReconciliationStatus = iota + StatusNeedsRecheck + StatusError + StatusNotSynced +) diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/controllerconfig.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/controllerconfig.go new file mode 100644 index 000000000..d0b5207a4 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/controllerconfig.go @@ -0,0 +1,80 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + restclient "k8s.io/client-go/rest" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" +) + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for controller that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration time.Duration + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline time.Duration + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod time.Duration + // resourceLock indicates the resource object type that will be used to lock + // during leader election cycles. + ResourceLock fedv1b1.ResourceLockType +} + +// KubeFedNamespaces defines the namespace configuration shared by +// most kubefed controllers. +type KubeFedNamespaces struct { + KubeFedNamespace string + TargetNamespace string +} + +// ClusterHealthCheckConfig defines the configurable parameters for cluster health check +type ClusterHealthCheckConfig struct { + Period time.Duration + FailureThreshold int64 + SuccessThreshold int64 + Timeout time.Duration +} + +// ControllerConfig defines the configuration common to KubeFed +// controllers. +type ControllerConfig struct { + KubeFedNamespaces + KubeConfig *restclient.Config + ClusterAvailableDelay time.Duration + ClusterUnavailableDelay time.Duration + MinimizeLatency bool + SkipAdoptingResources bool +} + +func (c *ControllerConfig) LimitedScope() bool { + return c.KubeFedNamespaces.TargetNamespace != metav1.NamespaceAll +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/delaying_deliverer.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/delaying_deliverer.go new file mode 100644 index 000000000..e087ad6d9 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/delaying_deliverer.go @@ -0,0 +1,183 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: consider moving it to a more generic package. +package util + +import ( + "container/heap" + "time" +) + +const ( + // TODO: Investigate what capacity is right. + delayingDelivererUpdateChanCapacity = 1000 +) + +// DelayingDelivererItem is structure delivered by DelayingDeliverer to the +// target channel. +type DelayingDelivererItem struct { + // Key under which the value was added to deliverer. + Key string + // Value of the item. + Value interface{} + // When the item should be delivered. + DeliveryTime time.Time +} + +type delivererHeap struct { + keyPosition map[string]int + data []*DelayingDelivererItem +} + +// Functions required by container.Heap. + +func (dh *delivererHeap) Len() int { return len(dh.data) } +func (dh *delivererHeap) Less(i, j int) bool { + return dh.data[i].DeliveryTime.Before(dh.data[j].DeliveryTime) +} +func (dh *delivererHeap) Swap(i, j int) { + dh.keyPosition[dh.data[i].Key] = j + dh.keyPosition[dh.data[j].Key] = i + dh.data[i], dh.data[j] = dh.data[j], dh.data[i] +} + +func (dh *delivererHeap) Push(x interface{}) { + item := x.(*DelayingDelivererItem) + dh.data = append(dh.data, item) + dh.keyPosition[item.Key] = len(dh.data) - 1 +} + +func (dh *delivererHeap) Pop() interface{} { + n := len(dh.data) + item := dh.data[n-1] + dh.data = dh.data[:n-1] + delete(dh.keyPosition, item.Key) + return item +} + +// A structure that pushes the items to the target channel at a given time. +type DelayingDeliverer struct { + // Channel to deliver the data when their time comes. + targetChannel chan *DelayingDelivererItem + // Store for data + heap *delivererHeap + // Channel to feed the main goroutine with updates. + updateChannel chan *DelayingDelivererItem + // To stop the main goroutine. + stopChannel chan struct{} +} + +func NewDelayingDeliverer() *DelayingDeliverer { + return NewDelayingDelivererWithChannel(make(chan *DelayingDelivererItem, 100)) +} + +func NewDelayingDelivererWithChannel(targetChannel chan *DelayingDelivererItem) *DelayingDeliverer { + return &DelayingDeliverer{ + targetChannel: targetChannel, + heap: &delivererHeap{ + keyPosition: make(map[string]int), + data: make([]*DelayingDelivererItem, 0), + }, + updateChannel: make(chan *DelayingDelivererItem, delayingDelivererUpdateChanCapacity), + stopChannel: make(chan struct{}), + } +} + +// Deliver all items due before or equal to timestamp. +func (d *DelayingDeliverer) deliver(timestamp time.Time) { + for d.heap.Len() > 0 { + if timestamp.Before(d.heap.data[0].DeliveryTime) { + return + } + item := heap.Pop(d.heap).(*DelayingDelivererItem) + d.targetChannel <- item + } +} + +func (d *DelayingDeliverer) run() { + for { + now := time.Now() + d.deliver(now) + + nextWakeUp := now.Add(time.Hour) + if d.heap.Len() > 0 { + nextWakeUp = d.heap.data[0].DeliveryTime + } + sleepTime := nextWakeUp.Sub(now) + + select { + case <-time.After(sleepTime): + break // just wake up and process the data + case item := <-d.updateChannel: + if position, found := d.heap.keyPosition[item.Key]; found { + if item.DeliveryTime.Before(d.heap.data[position].DeliveryTime) { + d.heap.data[position] = item + heap.Fix(d.heap, position) + } + // Ignore if later. + } else { + heap.Push(d.heap, item) + } + case <-d.stopChannel: + return + } + } +} + +// Starts the DelayingDeliverer. +func (d *DelayingDeliverer) Start() { + go d.run() +} + +// Stops the DelayingDeliverer. Undelivered items are discarded. +func (d *DelayingDeliverer) Stop() { + close(d.stopChannel) +} + +// Delivers value at the given time. +func (d *DelayingDeliverer) DeliverAt(key string, value interface{}, deliveryTime time.Time) { + d.updateChannel <- &DelayingDelivererItem{ + Key: key, + Value: value, + DeliveryTime: deliveryTime, + } +} + +// Delivers value after the given delay. +func (d *DelayingDeliverer) DeliverAfter(key string, value interface{}, delay time.Duration) { + d.DeliverAt(key, value, time.Now().Add(delay)) +} + +// Gets target channel of the deliverer. +func (d *DelayingDeliverer) GetTargetChannel() chan *DelayingDelivererItem { + return d.targetChannel +} + +// Starts Delaying deliverer with a handler listening on the target channel. +func (d *DelayingDeliverer) StartWithHandler(handler func(*DelayingDelivererItem)) { + go func() { + for { + select { + case item := <-d.targetChannel: + handler(item) + case <-d.stopChannel: + return + } + } + }() + d.Start() +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/federated_informer.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/federated_informer.go new file mode 100644 index 000000000..f268c3ad6 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/federated_informer.go @@ -0,0 +1,570 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "reflect" + "sync" + "time" + + "github.com/pkg/errors" + + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + + fedcommon "sigs.k8s.io/kubefed/pkg/apis/core/common" + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + "sigs.k8s.io/kubefed/pkg/client/generic" +) + +const ( + clusterSyncPeriod = 10 * time.Minute + userAgentName = "kubefed-controller" +) + +// An object with an origin information. +type FederatedObject struct { + Object interface{} + ClusterName string +} + +// FederatedReadOnlyStore is an overlay over multiple stores created in federated clusters. +type FederatedReadOnlyStore interface { + // Returns all items in the store. + List() ([]FederatedObject, error) + + // Returns all items from a cluster. + ListFromCluster(clusterName string) ([]interface{}, error) + + // GetKeyFor returns the key under which the item would be put in the store. + GetKeyFor(item interface{}) string + + // GetByKey returns the item stored under the given key in the specified cluster (if exist). + GetByKey(clusterName string, key string) (interface{}, bool, error) + + // Returns the items stored under the given key in all clusters. + GetFromAllClusters(key string) ([]FederatedObject, error) + + // Checks whether stores for all clusters form the lists (and only these) are there and + // are synced. This is only a basic check whether the data inside of the store is usable. + // It is not a full synchronization/locking mechanism it only tries to ensure that out-of-sync + // issues occur less often. All users of the interface should assume + // that there may be significant delays in content updates of all kinds and write their + // code that it doesn't break if something is slightly out-of-sync. + ClustersSynced(clusters []*fedv1b1.KubeFedCluster) bool +} + +// An interface to retrieve both KubeFedCluster resources and clients +// to access the clusters they represent. +type RegisteredClustersView interface { + // GetClientForCluster returns a client for the cluster, if present. + GetClientForCluster(clusterName string) (generic.Client, error) + + // GetUnreadyClusters returns a list of all clusters that are not ready yet. + GetUnreadyClusters() ([]*fedv1b1.KubeFedCluster, error) + + // GetReadyClusters returns all clusters for which the sub-informers are run. + GetReadyClusters() ([]*fedv1b1.KubeFedCluster, error) + + // GetClusters returns a list of all clusters. + GetClusters() ([]*fedv1b1.KubeFedCluster, error) + + // GetReadyCluster returns the cluster with the given name, if found. + GetReadyCluster(name string) (*fedv1b1.KubeFedCluster, bool, error) + + // ClustersSynced returns true if the view is synced (for the first time). + ClustersSynced() bool +} + +// FederatedInformer provides access to clusters registered with a +// KubeFed control plane and watches a given resource type in +// registered clusters. +// +// Whenever a new cluster is registered with KubeFed, an informer is +// created for it using TargetInformerFactory. Informers are stopped +// when a cluster is either put offline of deleted. It is assumed that +// some controller keeps an eye on the cluster list and thus the +// clusters in ETCD are up to date. +type FederatedInformer interface { + RegisteredClustersView + + // Returns a store created over all stores from target informers. + GetTargetStore() FederatedReadOnlyStore + + // Starts all the processes. + Start() + + // Stops all the processes inside the informer. + Stop() +} + +// A function that should be used to create an informer on the target object. Store should use +// cache.DeletionHandlingMetaNamespaceKeyFunc as a keying function. +type TargetInformerFactory func(*fedv1b1.KubeFedCluster, *restclient.Config) (cache.Store, cache.Controller, error) + +// A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired) +// when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired) +// when it is either deleted or becomes not ready. When cluster spec (IP)is modified both ClusterAvailable +// and ClusterUnavailable are fired. +type ClusterLifecycleHandlerFuncs struct { + // Fired when the cluster becomes available. + ClusterAvailable func(*fedv1b1.KubeFedCluster) + // Fired when the cluster becomes unavailable. The second arg contains data that was present + // in the cluster before deletion. + ClusterUnavailable func(*fedv1b1.KubeFedCluster, []interface{}) +} + +// Builds a FederatedInformer for the given configuration. +func NewFederatedInformer( + config *ControllerConfig, + client generic.Client, + apiResource *metav1.APIResource, + triggerFunc func(pkgruntime.Object), + clusterLifecycle *ClusterLifecycleHandlerFuncs) (FederatedInformer, error) { + + targetInformerFactory := func(cluster *fedv1b1.KubeFedCluster, clusterConfig *restclient.Config) (cache.Store, cache.Controller, error) { + resourceClient, err := NewResourceClient(clusterConfig, apiResource) + if err != nil { + return nil, nil, err + } + targetNamespace := NamespaceForCluster(cluster.Name, config.TargetNamespace) + store, controller := NewManagedResourceInformer(resourceClient, targetNamespace, apiResource, triggerFunc) + return store, controller, nil + } + + federatedInformer := &federatedInformerImpl{ + targetInformerFactory: targetInformerFactory, + configFactory: func(cluster *fedv1b1.KubeFedCluster) (*restclient.Config, error) { + clusterConfig, err := BuildClusterConfig(cluster, client, config.KubeFedNamespace) + if err != nil { + return nil, err + } + if clusterConfig == nil { + return nil, errors.Errorf("Unable to load configuration for cluster %q", cluster.Name) + } + restclient.AddUserAgent(clusterConfig, userAgentName) + return clusterConfig, nil + }, + targetInformers: make(map[string]informer), + fedNamespace: config.KubeFedNamespace, + clusterClients: make(map[string]generic.Client), + } + + getClusterData := func(name string) []interface{} { + data, err := federatedInformer.GetTargetStore().ListFromCluster(name) + if err != nil { + klog.Errorf("Failed to list %s content: %v", name, err) + return make([]interface{}, 0) + } + return data + } + + var err error + federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller, err = NewGenericInformerWithEventHandler( + config.KubeConfig, + config.KubeFedNamespace, + &fedv1b1.KubeFedCluster{}, + clusterSyncPeriod, + &cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(old interface{}) { + oldCluster, ok := old.(*fedv1b1.KubeFedCluster) + if ok { + var data []interface{} + if clusterLifecycle.ClusterUnavailable != nil { + data = getClusterData(oldCluster.Name) + } + federatedInformer.deleteCluster(oldCluster) + if clusterLifecycle.ClusterUnavailable != nil { + clusterLifecycle.ClusterUnavailable(oldCluster, data) + } + } + }, + AddFunc: func(cur interface{}) { + curCluster, ok := cur.(*fedv1b1.KubeFedCluster) + if !ok { + klog.Errorf("Cluster %v/%v not added; incorrect type", curCluster.Namespace, curCluster.Name) + } else if IsClusterReady(&curCluster.Status) { + federatedInformer.addCluster(curCluster) + klog.Infof("Cluster %v/%v is ready", curCluster.Namespace, curCluster.Name) + if clusterLifecycle.ClusterAvailable != nil { + clusterLifecycle.ClusterAvailable(curCluster) + } + } else { + klog.Infof("Cluster %v/%v not added; it is not ready.", curCluster.Namespace, curCluster.Name) + } + }, + UpdateFunc: func(old, cur interface{}) { + oldCluster, ok := old.(*fedv1b1.KubeFedCluster) + if !ok { + klog.Errorf("Internal error: Cluster %v not updated. Old cluster not of correct type.", old) + return + } + curCluster, ok := cur.(*fedv1b1.KubeFedCluster) + if !ok { + klog.Errorf("Internal error: Cluster %v not updated. New cluster not of correct type.", cur) + return + } + if IsClusterReady(&oldCluster.Status) != IsClusterReady(&curCluster.Status) || !reflect.DeepEqual(oldCluster.Spec, curCluster.Spec) || !reflect.DeepEqual(oldCluster.ObjectMeta.Labels, curCluster.ObjectMeta.Labels) || !reflect.DeepEqual(oldCluster.ObjectMeta.Annotations, curCluster.ObjectMeta.Annotations) { + var data []interface{} + if clusterLifecycle.ClusterUnavailable != nil { + data = getClusterData(oldCluster.Name) + } + federatedInformer.deleteCluster(oldCluster) + if clusterLifecycle.ClusterUnavailable != nil { + clusterLifecycle.ClusterUnavailable(oldCluster, data) + } + + if IsClusterReady(&curCluster.Status) { + federatedInformer.addCluster(curCluster) + if clusterLifecycle.ClusterAvailable != nil { + clusterLifecycle.ClusterAvailable(curCluster) + } + } + } else { + klog.V(7).Infof("Cluster %v not updated to %v as ready status and specs are identical", oldCluster, curCluster) + } + }, + }, + ) + return federatedInformer, err +} + +func IsClusterReady(clusterStatus *fedv1b1.KubeFedClusterStatus) bool { + for _, condition := range clusterStatus.Conditions { + if condition.Type == fedcommon.ClusterReady { + if condition.Status == apiv1.ConditionTrue { + return true + } + } + } + return false +} + +type informer struct { + controller cache.Controller + store cache.Store + stopChan chan struct{} +} + +type federatedInformerImpl struct { + sync.Mutex + + // Informer on federated clusters. + clusterInformer informer + + // Target informers factory + targetInformerFactory TargetInformerFactory + + // Structures returned by targetInformerFactory + targetInformers map[string]informer + + // Retrieves configuration to access a cluster. + configFactory func(*fedv1b1.KubeFedCluster) (*restclient.Config, error) + + // Caches cluster clients (reduces client discovery and secret retrieval) + clusterClients map[string]generic.Client + + // Namespace from which to source KubeFedCluster resources + fedNamespace string +} + +// *federatedInformerImpl implements FederatedInformer interface. +var _ FederatedInformer = &federatedInformerImpl{} + +type federatedStoreImpl struct { + federatedInformer *federatedInformerImpl +} + +func (f *federatedInformerImpl) Stop() { + klog.V(4).Infof("Stopping federated informer.") + f.Lock() + defer f.Unlock() + + klog.V(4).Infof("... Closing cluster informer channel.") + close(f.clusterInformer.stopChan) + for key, informer := range f.targetInformers { + klog.V(4).Infof("... Closing informer channel for %q.", key) + close(informer.stopChan) + // Remove each informer after it has been stopped to prevent + // subsequent cluster deletion from attempting to double close + // an informer's stop channel. + delete(f.targetInformers, key) + } +} + +func (f *federatedInformerImpl) Start() { + f.Lock() + defer f.Unlock() + + f.clusterInformer.stopChan = make(chan struct{}) + go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) +} + +// GetClientForCluster returns a client for the cluster, if present. +func (f *federatedInformerImpl) GetClientForCluster(clusterName string) (generic.Client, error) { + f.Lock() + defer f.Unlock() + + // return cached client if one exists (to prevent frequent secret retrieval and rest discovery) + if client, ok := f.clusterClients[clusterName]; ok { + return client, nil + } + config, err := f.getConfigForClusterUnlocked(clusterName) + if err != nil { + return nil, errors.Wrap(err, "Client creation failed") + } + client, err := generic.New(config) + if err != nil { + return client, err + } + f.clusterClients[clusterName] = client + return client, nil +} + +func (f *federatedInformerImpl) getConfigForClusterUnlocked(clusterName string) (*restclient.Config, error) { + // No locking needed. Will happen in f.GetCluster. + klog.V(4).Infof("Getting config for cluster %q", clusterName) + if cluster, found, err := f.getReadyClusterUnlocked(clusterName); found && err == nil { + return f.configFactory(cluster) + } else { + if err != nil { + return nil, err + } + } + return nil, errors.Errorf("cluster %q not found", clusterName) +} + +func (f *federatedInformerImpl) GetUnreadyClusters() ([]*fedv1b1.KubeFedCluster, error) { + f.Lock() + defer f.Unlock() + + items := f.clusterInformer.store.List() + result := make([]*fedv1b1.KubeFedCluster, 0, len(items)) + for _, item := range items { + if cluster, ok := item.(*fedv1b1.KubeFedCluster); ok { + if !IsClusterReady(&cluster.Status) { + result = append(result, cluster) + } + } else { + return nil, errors.Errorf("wrong data in FederatedInformerImpl cluster store: %v", item) + } + } + return result, nil +} + +// GetReadyClusters returns all clusters for which the sub-informers are run. +func (f *federatedInformerImpl) GetReadyClusters() ([]*fedv1b1.KubeFedCluster, error) { + return f.getClusters(true) +} + +// GetClusters returns all clusters regardless of ready state. +func (f *federatedInformerImpl) GetClusters() ([]*fedv1b1.KubeFedCluster, error) { + return f.getClusters(false) +} + +// GetReadyClusters returns only ready clusters if onlyReady is true and all clusters otherwise. +func (f *federatedInformerImpl) getClusters(onlyReady bool) ([]*fedv1b1.KubeFedCluster, error) { + f.Lock() + defer f.Unlock() + + items := f.clusterInformer.store.List() + result := make([]*fedv1b1.KubeFedCluster, 0, len(items)) + for _, item := range items { + if cluster, ok := item.(*fedv1b1.KubeFedCluster); ok { + if !onlyReady || IsClusterReady(&cluster.Status) { + result = append(result, cluster) + } + } else { + return nil, errors.Errorf("wrong data in FederatedInformerImpl cluster store: %v", item) + } + } + return result, nil +} + +// GetCluster returns the cluster with the given name, if found. +func (f *federatedInformerImpl) GetReadyCluster(name string) (*fedv1b1.KubeFedCluster, bool, error) { + f.Lock() + defer f.Unlock() + return f.getReadyClusterUnlocked(name) +} + +func (f *federatedInformerImpl) getReadyClusterUnlocked(name string) (*fedv1b1.KubeFedCluster, bool, error) { + key := fmt.Sprintf("%s/%s", f.fedNamespace, name) + if obj, exist, err := f.clusterInformer.store.GetByKey(key); exist && err == nil { + if cluster, ok := obj.(*fedv1b1.KubeFedCluster); ok { + if IsClusterReady(&cluster.Status) { + return cluster, true, nil + } + return nil, false, nil + + } + return nil, false, errors.Errorf("wrong data in FederatedInformerImpl cluster store: %v", obj) + + } else { + return nil, false, err + } +} + +// Synced returns true if the view is synced (for the first time) +func (f *federatedInformerImpl) ClustersSynced() bool { + return f.clusterInformer.controller.HasSynced() +} + +// Adds the given cluster to federated informer. +func (f *federatedInformerImpl) addCluster(cluster *fedv1b1.KubeFedCluster) { + f.Lock() + defer f.Unlock() + name := cluster.Name + if config, err := f.getConfigForClusterUnlocked(name); err == nil { + store, controller, err := f.targetInformerFactory(cluster, config) + if err != nil { + // TODO: create also an event for cluster. + klog.Errorf("Failed to create an informer for cluster %q: %v", cluster.Name, err) + return + } + targetInformer := informer{ + controller: controller, + store: store, + stopChan: make(chan struct{}), + } + f.targetInformers[name] = targetInformer + go targetInformer.controller.Run(targetInformer.stopChan) + } else { + // TODO: create also an event for cluster. + klog.Errorf("Failed to create a client for cluster: %v", err) + } +} + +// Removes the cluster from federated informer. +func (f *federatedInformerImpl) deleteCluster(cluster *fedv1b1.KubeFedCluster) { + f.Lock() + defer f.Unlock() + name := cluster.Name + if targetInformer, found := f.targetInformers[name]; found { + close(targetInformer.stopChan) + } + delete(f.targetInformers, name) + delete(f.clusterClients, name) +} + +// Returns a store created over all stores from target informers. +func (f *federatedInformerImpl) GetTargetStore() FederatedReadOnlyStore { + return &federatedStoreImpl{ + federatedInformer: f, + } +} + +// Returns all items in the store. +func (fs *federatedStoreImpl) List() ([]FederatedObject, error) { + fs.federatedInformer.Lock() + defer fs.federatedInformer.Unlock() + + result := make([]FederatedObject, 0) + for clusterName, targetInformer := range fs.federatedInformer.targetInformers { + for _, value := range targetInformer.store.List() { + result = append(result, FederatedObject{ClusterName: clusterName, Object: value}) + } + } + return result, nil +} + +// Returns all items in the given cluster. +func (fs *federatedStoreImpl) ListFromCluster(clusterName string) ([]interface{}, error) { + fs.federatedInformer.Lock() + defer fs.federatedInformer.Unlock() + + result := make([]interface{}, 0) + if targetInformer, found := fs.federatedInformer.targetInformers[clusterName]; found { + values := targetInformer.store.List() + result = append(result, values...) + } + return result, nil +} + +// GetByKey returns the item stored under the given key in the specified cluster (if exist). +func (fs *federatedStoreImpl) GetByKey(clusterName string, key string) (interface{}, bool, error) { + fs.federatedInformer.Lock() + defer fs.federatedInformer.Unlock() + if targetInformer, found := fs.federatedInformer.targetInformers[clusterName]; found { + return targetInformer.store.GetByKey(key) + } + return nil, false, nil +} + +// Returns the items stored under the given key in all clusters. +func (fs *federatedStoreImpl) GetFromAllClusters(key string) ([]FederatedObject, error) { + fs.federatedInformer.Lock() + defer fs.federatedInformer.Unlock() + + result := make([]FederatedObject, 0) + for clusterName, targetInformer := range fs.federatedInformer.targetInformers { + value, exist, err := targetInformer.store.GetByKey(key) + if err != nil { + return nil, err + } + if exist { + result = append(result, FederatedObject{ClusterName: clusterName, Object: value}) + } + } + return result, nil +} + +// GetKeyFor returns the key under which the item would be put in the store. +func (fs *federatedStoreImpl) GetKeyFor(item interface{}) string { + // TODO: support other keying functions. + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(item) + return key +} + +// Checks whether stores for all clusters form the lists (and only these) are there and +// are synced. +func (fs *federatedStoreImpl) ClustersSynced(clusters []*fedv1b1.KubeFedCluster) bool { + + // Get the list of informers to check under a lock and check it outside. + okSoFar, informersToCheck := func() (bool, []informer) { + fs.federatedInformer.Lock() + defer fs.federatedInformer.Unlock() + + if len(fs.federatedInformer.targetInformers) != len(clusters) { + return false, []informer{} + } + informersToCheck := make([]informer, 0, len(clusters)) + for _, cluster := range clusters { + if targetInformer, found := fs.federatedInformer.targetInformers[cluster.Name]; found { + informersToCheck = append(informersToCheck, targetInformer) + } else { + return false, []informer{} + } + } + return true, informersToCheck + }() + + if !okSoFar { + return false + } + for _, informerToCheck := range informersToCheck { + if !informerToCheck.controller.HasSynced() { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/federatedstatus.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/federatedstatus.go new file mode 100644 index 000000000..4d22d084f --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/federatedstatus.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FederatedResource is a generic representation of a federated type +type FederatedResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + ClusterStatus []ResourceClusterStatus `json:"clusterStatus,omitempty"` +} + +// ResourceClusterStatus defines the status of federated resource within a cluster +type ResourceClusterStatus struct { + ClusterName string `json:"clusterName,omitempty"` + Status map[string]interface{} `json:"status,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/genericinformer.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/genericinformer.go new file mode 100644 index 000000000..3bdfe9aaf --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/genericinformer.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" + + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "sigs.k8s.io/kubefed/pkg/client/generic/scheme" +) + +func NewGenericInformer(config *rest.Config, namespace string, obj pkgruntime.Object, resyncPeriod time.Duration, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller, error) { + return NewGenericInformerWithEventHandler(config, namespace, obj, resyncPeriod, NewTriggerOnAllChanges(triggerFunc)) +} + +func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, obj pkgruntime.Object, resyncPeriod time.Duration, resourceEventHandlerFuncs *cache.ResourceEventHandlerFuncs) (cache.Store, cache.Controller, error) { + gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) + if err != nil { + return nil, nil, err + } + + mapper, err := apiutil.NewDiscoveryRESTMapper(config) + if err != nil { + return nil, nil, errors.Wrap(err, "Could not create RESTMapper from config") + } + + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, nil, err + } + + client, err := apiutil.RESTClientForGVK(gvk, config, scheme.Codecs) + if err != nil { + return nil, nil, err + } + + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := scheme.Scheme.New(listGVK) + if err != nil { + return nil, nil, err + } + + store, controller := cache.NewInformer( + &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (pkgruntime.Object, error) { + res := listObj.DeepCopyObject() + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, scheme.ParameterCodec).Do().Into(res) + return res, err + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Watch needs to be set to true separately + opts.Watch = true + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, scheme.ParameterCodec).Watch() + }, + }, + obj, + resyncPeriod, + resourceEventHandlerFuncs, + ) + return store, controller, nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/handlers.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/handlers.go new file mode 100644 index 000000000..38d4b669a --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/handlers.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "reflect" + + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +// Returns cache.ResourceEventHandlerFuncs that trigger the given function +// on all object changes. +func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs { + return &cache.ResourceEventHandlerFuncs{ + DeleteFunc: func(old interface{}) { + if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok { + // This object might be stale but ok for our current usage. + old = deleted.Obj + if old == nil { + return + } + } + oldObj := old.(pkgruntime.Object) + triggerFunc(oldObj) + }, + AddFunc: func(cur interface{}) { + curObj := cur.(pkgruntime.Object) + triggerFunc(curObj) + }, + UpdateFunc: func(old, cur interface{}) { + curObj := cur.(pkgruntime.Object) + if !reflect.DeepEqual(old, cur) { + triggerFunc(curObj) + } + }, + } +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/managedlabel.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/managedlabel.go new file mode 100644 index 000000000..5f678561e --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/managedlabel.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +const ( + ManagedByKubeFedLabelKey = "kubefed.io/managed" + ManagedByKubeFedLabelValue = "true" + UnmanagedByKubeFedLabelValue = "false" +) + +// HasManagedLabel indicates whether the given object has the managed +// label. +func HasManagedLabel(obj *unstructured.Unstructured) bool { + labels := obj.GetLabels() + if labels == nil { + return false + } + return labels[ManagedByKubeFedLabelKey] == ManagedByKubeFedLabelValue +} + +// IsExplicitlyUnmanaged indicates whether the given object has the managed +// label with value false. +func IsExplicitlyUnmanaged(obj *unstructured.Unstructured) bool { + labels := obj.GetLabels() + if labels == nil { + return false + } + return labels[ManagedByKubeFedLabelKey] == UnmanagedByKubeFedLabelValue +} + +// AddManagedLabel ensures that the given object has the managed +// label. +func AddManagedLabel(obj *unstructured.Unstructured) { + labels := obj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + labels[ManagedByKubeFedLabelKey] = ManagedByKubeFedLabelValue + obj.SetLabels(labels) +} + +// RemoveManagedLabel ensures that the given object does not have the +// managed label. +func RemoveManagedLabel(obj *unstructured.Unstructured) { + labels := obj.GetLabels() + if labels == nil || labels[ManagedByKubeFedLabelKey] != ManagedByKubeFedLabelValue { + return + } + delete(labels, ManagedByKubeFedLabelKey) + obj.SetLabels(labels) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/meta.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/meta.go new file mode 100644 index 000000000..d8e58aa49 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/meta.go @@ -0,0 +1,139 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + "reflect" + + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + pkgruntime "k8s.io/apimachinery/pkg/runtime" +) + +// Copies cluster-independent, user provided data from the given ObjectMeta struct. If in +// the future the ObjectMeta structure is expanded then any field that is not populated +// by the api server should be included here. +func copyObjectMeta(obj metav1.ObjectMeta) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: obj.Name, + Namespace: obj.Namespace, + Labels: obj.Labels, + Annotations: obj.Annotations, + ResourceVersion: obj.ResourceVersion, + } +} + +// Deep copies cluster-independent, user provided data from the given ObjectMeta struct. If in +// the future the ObjectMeta structure is expanded then any field that is not populated +// by the api server should be included here. +func DeepCopyRelevantObjectMeta(obj metav1.ObjectMeta) metav1.ObjectMeta { + copyMeta := copyObjectMeta(obj) + if obj.Labels != nil { + copyMeta.Labels = make(map[string]string) + for key, val := range obj.Labels { + copyMeta.Labels[key] = val + } + } + if obj.Annotations != nil { + copyMeta.Annotations = make(map[string]string) + for key, val := range obj.Annotations { + copyMeta.Annotations[key] = val + } + } + return copyMeta +} + +// Checks if cluster-independent, user provided data in two given ObjectMeta are equal. If in +// the future the ObjectMeta structure is expanded then any field that is not populated +// by the api server should be included here. +func ObjectMetaEquivalent(a, b metav1.ObjectMeta) bool { + if a.Name != b.Name { + return false + } + if a.Namespace != b.Namespace { + return false + } + if !reflect.DeepEqual(a.Labels, b.Labels) && (len(a.Labels) != 0 || len(b.Labels) != 0) { + return false + } + if !reflect.DeepEqual(a.Annotations, b.Annotations) && (len(a.Annotations) != 0 || len(b.Annotations) != 0) { + return false + } + return true +} + +// Checks if cluster-independent, user provided data in two given ObjectMeta are equal. If in +// the future the ObjectMeta structure is expanded then any field that is not populated +// by the api server should be included here. +func ObjectMetaObjEquivalent(a, b metav1.Object) bool { + if a.GetName() != b.GetName() { + return false + } + if a.GetNamespace() != b.GetNamespace() { + return false + } + aLabels := a.GetLabels() + bLabels := b.GetLabels() + if !reflect.DeepEqual(aLabels, bLabels) && (len(aLabels) != 0 || len(bLabels) != 0) { + return false + } + aAnnotations := a.GetAnnotations() + bAnnotations := b.GetAnnotations() + if !reflect.DeepEqual(aAnnotations, bAnnotations) && (len(aAnnotations) != 0 || len(bAnnotations) != 0) { + return false + } + return true +} + +// Checks if cluster-independent, user provided data in ObjectMeta and Spec in two given top +// level api objects are equivalent. +func ObjectMetaAndSpecEquivalent(a, b runtime.Object) bool { + objectMetaA := reflect.ValueOf(a).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta) + objectMetaB := reflect.ValueOf(b).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta) + specA := reflect.ValueOf(a).Elem().FieldByName("Spec").Interface() + specB := reflect.ValueOf(b).Elem().FieldByName("Spec").Interface() + return ObjectMetaEquivalent(objectMetaA, objectMetaB) && reflect.DeepEqual(specA, specB) +} + +func MetaAccessor(obj pkgruntime.Object) metav1.Object { + accessor, err := meta.Accessor(obj) + if err != nil { + // This should always succeed if obj is not nil. Also, + // adapters are slated for replacement by unstructured. + return nil + } + return accessor +} + +// GetUnstructured return Unstructured for any given kubernetes type +func GetUnstructured(resource interface{}) (*unstructured.Unstructured, error) { + content, err := json.Marshal(resource) + if err != nil { + return nil, errors.Wrap(err, "Failed to JSON Marshal") + } + unstructuredResource := &unstructured.Unstructured{} + err = unstructuredResource.UnmarshalJSON(content) + if err != nil { + return nil, errors.Wrap(err, "Failed to UnmarshalJSON into unstructured content") + } + return unstructuredResource, nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/naming.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/naming.go new file mode 100644 index 000000000..436a7c418 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/naming.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// The functions in this file are exposed as variables to allow them +// to be overridden for testing purposes. Simulated scale testing +// requires being able to change the namespace of target resources +// (NamespaceForCluster and QualifiedNameForCluster) and ensure that +// the namespace of a federated resource will always be the kubefed +// system namespace (NamespaceForResource). + +func namespaceForCluster(clusterName, namespace string) string { + return namespace +} + +// NamespaceForCluster returns the namespace to use for the given cluster. +var NamespaceForCluster = namespaceForCluster + +func namespaceForResource(resourceNamespace, fedNamespace string) string { + return resourceNamespace +} + +// NamespaceForResource returns either the kubefed namespace or +// resource namespace. +var NamespaceForResource = namespaceForResource + +func qualifiedNameForCluster(clusterName string, qualifiedName QualifiedName) QualifiedName { + return qualifiedName +} + +// QualifiedNameForCluster returns the qualified name to use for the +// given cluster. +var QualifiedNameForCluster = qualifiedNameForCluster diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/orphaninganotation.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/orphaninganotation.go new file mode 100644 index 000000000..faba671c8 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/orphaninganotation.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + +const ( + // If this annotation is present on a federated resource, resources in the + // member clusters managed by the federated resource should be orphaned. + // If the annotation is not present (the default), resources in member + // clusters will be deleted before the federated resource is deleted. + OrphanManagedResourcesAnnotation = "kubefed.io/orphan" + OrphanedManagedResourcesValue = "true" +) + +// IsOrphaningEnabled checks status of "orphaning enable" (OrphanManagedResources: OrphanedManagedResourceslValue') +// annotation on a resource. +func IsOrphaningEnabled(obj *unstructured.Unstructured) bool { + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } + return annotations[OrphanManagedResourcesAnnotation] == OrphanedManagedResourcesValue +} + +// Enables the orphaning mode +func EnableOrphaning(obj *unstructured.Unstructured) { + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[OrphanManagedResourcesAnnotation] = OrphanedManagedResourcesValue + obj.SetAnnotations(annotations) +} + +// Disables the orphaning mode +func DisableOrphaning(obj *unstructured.Unstructured) { + annotations := obj.GetAnnotations() + if annotations == nil { + return + } + delete(annotations, OrphanManagedResourcesAnnotation) + obj.SetAnnotations(annotations) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/overrides.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/overrides.go new file mode 100644 index 000000000..28378ffdb --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/overrides.go @@ -0,0 +1,192 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + + "github.com/evanphx/json-patch" + "github.com/pkg/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" +) + +type ClusterOverride struct { + Op string `json:"op,omitempty"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} + +type GenericOverrideItem struct { + ClusterName string `json:"clusterName"` + ClusterOverrides []ClusterOverride `json:"clusterOverrides,omitempty"` +} + +type GenericOverrideSpec struct { + Overrides []GenericOverrideItem `json:"overrides,omitempty"` +} + +type GenericOverride struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec *GenericOverrideSpec `json:"spec,omitempty"` +} + +// Namespace and name may not be overridden since these fields are the +// primary mechanism of association between a federated resource in +// the host cluster and the target resources in the member clusters. +// +// Kind should always be sourced from the FTC and not vary across +// member clusters. +// +// apiVersion can be overridden to support managing resources like +// Ingress which can exist in different groups at different +// versions. Users will need to take care not to abuse this +// capability. +var invalidPaths = sets.NewString( + "/metadata/namespace", + "/metadata/name", + "/metadata/generateName", + "/kind", +) + +// Slice of ClusterOverride +type ClusterOverrides []ClusterOverride + +// Mapping of clusterName to overrides for the cluster +type OverridesMap map[string]ClusterOverrides + +// ToUnstructuredSlice converts the map of overrides to a slice of +// interfaces that can be set in an unstructured object. +func (m OverridesMap) ToUnstructuredSlice() []interface{} { + overrides := []interface{}{} + for clusterName, clusterOverrides := range m { + overridesItem := map[string]interface{}{ + ClusterNameField: clusterName, + ClusterOverridesField: clusterOverrides, + } + overrides = append(overrides, overridesItem) + } + return overrides +} + +// GetOverrides returns a map of overrides populated from the given +// unstructured object. +func GetOverrides(rawObj *unstructured.Unstructured) (OverridesMap, error) { + overridesMap := make(OverridesMap) + + if rawObj == nil { + return overridesMap, nil + } + + genericFedObject := GenericOverride{} + err := UnstructuredToInterface(rawObj, &genericFedObject) + if err != nil { + return nil, err + } + + if genericFedObject.Spec == nil || genericFedObject.Spec.Overrides == nil { + // No overrides defined for the federated type + return overridesMap, nil + } + + for _, overrideItem := range genericFedObject.Spec.Overrides { + clusterName := overrideItem.ClusterName + if _, ok := overridesMap[clusterName]; ok { + return nil, errors.Errorf("cluster %q appears more than once", clusterName) + } + + clusterOverrides := overrideItem.ClusterOverrides + + paths := sets.NewString() + for i, clusterOverride := range clusterOverrides { + path := clusterOverride.Path + if invalidPaths.Has(path) { + return nil, errors.Errorf("override[%d] for cluster %q has an invalid path: %s", i, clusterName, path) + } + if paths.Has(path) { + return nil, errors.Errorf("path %q appears more than once for cluster %q", path, clusterName) + } + paths.Insert(path) + } + overridesMap[clusterName] = clusterOverrides + } + + return overridesMap, nil +} + +// SetOverrides sets the spec.overrides field of the unstructured +// object from the provided overrides map. +func SetOverrides(fedObject *unstructured.Unstructured, overridesMap OverridesMap) error { + rawSpec := fedObject.Object[SpecField] + if rawSpec == nil { + rawSpec = map[string]interface{}{} + fedObject.Object[SpecField] = rawSpec + } + + spec, ok := rawSpec.(map[string]interface{}) + if !ok { + return errors.Errorf("Unable to set overrides since %q is not an object: %T", SpecField, rawSpec) + } + spec[OverridesField] = overridesMap.ToUnstructuredSlice() + return nil +} + +// UnstructuredToInterface converts an unstructured object to the +// provided interface by json marshalling/unmarshalling. +func UnstructuredToInterface(rawObj *unstructured.Unstructured, obj interface{}) error { + content, err := rawObj.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(content, obj) +} + +// ApplyJsonPatch applies the override on to the given unstructured object. +func ApplyJsonPatch(obj *unstructured.Unstructured, overrides ClusterOverrides) error { + // TODO: Do the defaulting of "op" field to "replace" in API defaulting + for i, overrideItem := range overrides { + if overrideItem.Op == "" { + overrides[i].Op = "replace" + } + } + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return err + } + + ObjectJSONBytes, err := obj.MarshalJSON() + if err != nil { + return err + } + + patchedObjectJSONBytes, err := patch.Apply(ObjectJSONBytes) + if err != nil { + return err + } + + err = obj.UnmarshalJSON(patchedObjectJSONBytes) + return err +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/placement.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/placement.go new file mode 100644 index 000000000..662652667 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/placement.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" +) + +type GenericClusterReference struct { + Name string `json:"name"` +} + +type GenericPlacementFields struct { + Clusters []GenericClusterReference `json:"clusters,omitempty"` + ClusterSelector *metav1.LabelSelector `json:"clusterSelector,omitempty"` +} + +type GenericPlacementSpec struct { + Placement GenericPlacementFields `json:"placement,omitempty"` +} + +type GenericPlacement struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GenericPlacementSpec `json:"spec,omitempty"` +} + +func UnmarshalGenericPlacement(obj *unstructured.Unstructured) (*GenericPlacement, error) { + placement := &GenericPlacement{} + err := UnstructuredToInterface(obj, placement) + if err != nil { + return nil, err + } + return placement, nil +} + +func (p *GenericPlacement) ClusterNames() []string { + if p.Spec.Placement.Clusters == nil { + return nil + } + clusterNames := []string{} + for _, cluster := range p.Spec.Placement.Clusters { + clusterNames = append(clusterNames, cluster.Name) + } + return clusterNames +} + +func (p *GenericPlacement) ClusterSelector() (labels.Selector, error) { + return metav1.LabelSelectorAsSelector(p.Spec.Placement.ClusterSelector) +} + +func GetClusterNames(obj *unstructured.Unstructured) ([]string, error) { + placement, err := UnmarshalGenericPlacement(obj) + if err != nil { + return nil, err + } + return placement.ClusterNames(), nil +} + +func SetClusterNames(obj *unstructured.Unstructured, clusterNames []string) error { + var clusters []interface{} + if clusterNames != nil { + clusters = []interface{}{} + for _, clusterName := range clusterNames { + clusters = append(clusters, map[string]interface{}{ + NameField: clusterName, + }) + } + } + return unstructured.SetNestedSlice(obj.Object, clusters, SpecField, PlacementField, ClustersField) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/propagatedversion.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/propagatedversion.go new file mode 100644 index 000000000..949603e9a --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/propagatedversion.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + fedv1a1 "sigs.k8s.io/kubefed/pkg/apis/core/v1alpha1" +) + +const ( + generationPrefix = "gen:" + resourceVersionPrefix = "rv:" +) + +// ObjectVersion retrieves the field type-prefixed value used for +// determining currency of the given cluster object. +func ObjectVersion(clusterObj *unstructured.Unstructured) string { + generation := clusterObj.GetGeneration() + if generation != 0 { + return fmt.Sprintf("%s%d", generationPrefix, generation) + } + return fmt.Sprintf("%s%s", resourceVersionPrefix, clusterObj.GetResourceVersion()) +} + +// ObjectNeedsUpdate determines whether the 2 objects provided cluster +// object needs to be updated according to the desired object and the +// recorded version. +func ObjectNeedsUpdate(desiredObj, clusterObj *unstructured.Unstructured, recordedVersion string) bool { + targetVersion := ObjectVersion(clusterObj) + + if recordedVersion != targetVersion { + return true + } + + // If versions match and the version is sourced from the + // generation field, a further check of metadata equivalency is + // required. + return strings.HasPrefix(targetVersion, generationPrefix) && !ObjectMetaObjEquivalent(desiredObj, clusterObj) +} + +// SortClusterVersions ASCII sorts the given cluster versions slice +// based on cluster name. +func SortClusterVersions(versions []fedv1a1.ClusterObjectVersion) { + sort.Slice(versions, func(i, j int) bool { + return versions[i].ClusterName < versions[j].ClusterName + }) +} + +// PropagatedVersionStatusEquivalent returns true if both statuses are equal by +// comparing Template and Override version, and their ClusterVersion slices; +// false otherwise. +func PropagatedVersionStatusEquivalent(pvs1, pvs2 *fedv1a1.PropagatedVersionStatus) bool { + return pvs1.TemplateVersion == pvs2.TemplateVersion && + pvs1.OverrideVersion == pvs2.OverrideVersion && + reflect.DeepEqual(pvs1.ClusterVersions, pvs2.ClusterVersions) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/qualifiedname.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/qualifiedname.go new file mode 100644 index 000000000..ec56fea6b --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/qualifiedname.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + meta "k8s.io/apimachinery/pkg/api/meta" + pkgruntime "k8s.io/apimachinery/pkg/runtime" +) + +// QualifiedName comprises a resource name with an optional namespace. +// If namespace is provided, a QualifiedName will be rendered as +// "/". If not, it will be rendered as "name". This +// is intended to allow the FederatedTypeAdapter interface and its +// consumers to operate on both namespaces and namespace-qualified +// resources. + +type QualifiedName struct { + Namespace string + Name string +} + +func NewQualifiedName(obj pkgruntime.Object) QualifiedName { + accessor, err := meta.Accessor(obj) + if err != nil { + // TODO(marun) This should never happen, but if it does, the + // resulting empty name. + return QualifiedName{} + } + return QualifiedName{ + Namespace: accessor.GetNamespace(), + Name: accessor.GetName(), + } +} + +// String returns the general purpose string representation +func (n QualifiedName) String() string { + if len(n.Namespace) == 0 { + return n.Name + } + return fmt.Sprintf("%s/%s", n.Namespace, n.Name) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceclient.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceclient.go new file mode 100644 index 000000000..b000d5cc9 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceclient.go @@ -0,0 +1,70 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +type ResourceClient interface { + Resources(namespace string) dynamic.ResourceInterface + Kind() string +} + +type resourceClient struct { + client dynamic.Interface + apiResource schema.GroupVersionResource + namespaced bool + kind string +} + +func NewResourceClient(config *rest.Config, apiResource *metav1.APIResource) (ResourceClient, error) { + resource := schema.GroupVersionResource{ + Group: apiResource.Group, + Version: apiResource.Version, + Resource: apiResource.Name, + } + client, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + return &resourceClient{ + client: client, + apiResource: resource, + namespaced: apiResource.Namespaced, + kind: apiResource.Kind, + }, nil +} + +func (c *resourceClient) Resources(namespace string) dynamic.ResourceInterface { + // TODO(marun) Consider returning Interface instead of + // ResourceInterface to allow callers to decide if they want to + // invoke Namespace(). Either that, or replace the use of + // ResourceClient with the controller-runtime generic client. + if c.namespaced { + return c.client.Resource(c.apiResource).Namespace(namespace) + } + return c.client.Resource(c.apiResource) +} + +func (c *resourceClient) Kind() string { + return c.kind +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceinformer.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceinformer.go new file mode 100644 index 000000000..30b785743 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceinformer.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "github.com/pkg/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +// NewResourceInformer returns an unfiltered informer. +func NewResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller) { + return newResourceInformer(client, namespace, apiResource, triggerFunc, "") +} + +// NewManagedResourceInformer returns an informer limited to resources +// managed by KubeFed as indicated by labeling. +func NewManagedResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller) { + labelSelector := labels.Set(map[string]string{ManagedByKubeFedLabelKey: ManagedByKubeFedLabelValue}).AsSelector().String() + return newResourceInformer(client, namespace, apiResource, triggerFunc, labelSelector) +} + +func newResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object), labelSelector string) (cache.Store, cache.Controller) { + obj := &unstructured.Unstructured{} + + if apiResource != nil { + gvk := schema.GroupVersionKind{Group: apiResource.Group, Version: apiResource.Version, Kind: apiResource.Kind} + obj.SetGroupVersionKind(gvk) + } + return cache.NewInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (pkgruntime.Object, error) { + options.LabelSelector = labelSelector + return client.Resources(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.LabelSelector = labelSelector + return client.Resources(namespace).Watch(options) + }, + }, + obj, // use an unstructured type with apiVersion / kind populated for informer logging purposes + NoResyncPeriod, + NewTriggerOnAllChanges(triggerFunc), + ) +} + +func ObjFromCache(store cache.Store, kind, key string) (*unstructured.Unstructured, error) { + obj, err := rawObjFromCache(store, kind, key) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + return obj.(*unstructured.Unstructured), nil +} + +func rawObjFromCache(store cache.Store, kind, key string) (pkgruntime.Object, error) { + cachedObj, exist, err := store.GetByKey(key) + if err != nil { + wrappedErr := errors.Wrapf(err, "Failed to query %s store for %q", kind, key) + runtime.HandleError(wrappedErr) + return nil, err + } + if !exist { + return nil, nil + } + return cachedObj.(pkgruntime.Object).DeepCopyObject(), nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/safe_map.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/safe_map.go new file mode 100644 index 000000000..af3c9064e --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/safe_map.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "sync" +) + +type SafeMap struct { + sync.RWMutex + m map[string]interface{} +} + +func NewSafeMap() *SafeMap { + return &SafeMap{ + m: make(map[string]interface{}), + } +} + +func (s *SafeMap) Store(key string, value interface{}) { + s.Lock() + defer s.Unlock() + s.m[key] = value +} + +func (s *SafeMap) Get(key string) (interface{}, bool) { + s.RLock() + defer s.RUnlock() + value, ok := s.m[key] + return value, ok +} + +func (s *SafeMap) GetAll() []interface{} { + s.RLock() + defer s.RUnlock() + vals := []interface{}{} + for _, val := range s.m { + vals = append(vals, val) + } + return vals +} + +func (s *SafeMap) Delete(key string) { + s.Lock() + defer s.Unlock() + delete(s.m, key) +} + +func (s *SafeMap) DeleteAll() { + s.Lock() + defer s.Unlock() + for key := range s.m { + delete(s.m, key) + } +} + +func (s *SafeMap) Size() int { + s.Lock() + defer s.Unlock() + return len(s.m) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/controller/util/worker.go b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/worker.go new file mode 100644 index 000000000..06244ae7b --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/controller/util/worker.go @@ -0,0 +1,170 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" + + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/client-go/util/workqueue" +) + +type ReconcileFunc func(qualifiedName QualifiedName) ReconciliationStatus + +type ReconcileWorker interface { + Enqueue(qualifiedName QualifiedName) + EnqueueForClusterSync(qualifiedName QualifiedName) + EnqueueForError(qualifiedName QualifiedName) + EnqueueForRetry(qualifiedName QualifiedName) + EnqueueObject(obj pkgruntime.Object) + EnqueueWithDelay(qualifiedName QualifiedName, delay time.Duration) + Run(stopChan <-chan struct{}) + SetDelay(retryDelay, clusterSyncDelay time.Duration) +} + +type WorkerTiming struct { + Interval time.Duration + RetryDelay time.Duration + ClusterSyncDelay time.Duration + InitialBackoff time.Duration + MaxBackoff time.Duration +} + +type asyncWorker struct { + reconcile ReconcileFunc + + timing WorkerTiming + + // For triggering reconciliation of a single resource. This is + // used when there is an add/update/delete operation on a resource + // in either the API of the cluster hosting KubeFed or in the API + // of a member cluster. + deliverer *DelayingDeliverer + + // Work queue allowing parallel processing of resources + queue workqueue.Interface + + // Backoff manager + backoff *flowcontrol.Backoff +} + +func NewReconcileWorker(reconcile ReconcileFunc, timing WorkerTiming) ReconcileWorker { + if timing.Interval == 0 { + timing.Interval = time.Second * 1 + } + if timing.RetryDelay == 0 { + timing.RetryDelay = time.Second * 10 + } + if timing.InitialBackoff == 0 { + timing.InitialBackoff = time.Second * 5 + } + if timing.MaxBackoff == 0 { + timing.MaxBackoff = time.Minute + } + return &asyncWorker{ + reconcile: reconcile, + timing: timing, + deliverer: NewDelayingDeliverer(), + queue: workqueue.New(), + backoff: flowcontrol.NewBackOff(timing.InitialBackoff, timing.MaxBackoff), + } +} + +func (w *asyncWorker) Enqueue(qualifiedName QualifiedName) { + w.deliver(qualifiedName, 0, false) +} + +func (w *asyncWorker) EnqueueForError(qualifiedName QualifiedName) { + w.deliver(qualifiedName, 0, true) +} + +func (w *asyncWorker) EnqueueForRetry(qualifiedName QualifiedName) { + w.deliver(qualifiedName, w.timing.RetryDelay, false) +} + +func (w *asyncWorker) EnqueueForClusterSync(qualifiedName QualifiedName) { + w.deliver(qualifiedName, w.timing.ClusterSyncDelay, false) +} + +func (w *asyncWorker) EnqueueObject(obj pkgruntime.Object) { + qualifiedName := NewQualifiedName(obj) + w.Enqueue(qualifiedName) +} + +func (w *asyncWorker) EnqueueWithDelay(qualifiedName QualifiedName, delay time.Duration) { + w.deliver(qualifiedName, delay, false) +} + +func (w *asyncWorker) Run(stopChan <-chan struct{}) { + StartBackoffGC(w.backoff, stopChan) + w.deliverer.StartWithHandler(func(item *DelayingDelivererItem) { + w.queue.Add(item) + }) + go wait.Until(w.worker, w.timing.Interval, stopChan) + + // Ensure all goroutines are cleaned up when the stop channel closes + go func() { + <-stopChan + w.queue.ShutDown() + w.deliverer.Stop() + }() +} + +func (w *asyncWorker) SetDelay(retryDelay, clusterSyncDelay time.Duration) { + w.timing.RetryDelay = retryDelay + w.timing.ClusterSyncDelay = clusterSyncDelay +} + +// deliver adds backoff to delay if this delivery is related to some +// failure. Resets backoff if there was no failure. +func (w *asyncWorker) deliver(qualifiedName QualifiedName, delay time.Duration, failed bool) { + key := qualifiedName.String() + if failed { + w.backoff.Next(key, time.Now()) + delay = delay + w.backoff.Get(key) + } else { + w.backoff.Reset(key) + } + w.deliverer.DeliverAfter(key, &qualifiedName, delay) +} + +func (w *asyncWorker) worker() { + for { + obj, quit := w.queue.Get() + if quit { + return + } + + item := obj.(*DelayingDelivererItem) + qualifiedName := item.Value.(*QualifiedName) + status := w.reconcile(*qualifiedName) + w.queue.Done(item) + + switch status { + case StatusAllOK: + break + case StatusError: + w.EnqueueForError(*qualifiedName) + case StatusNeedsRecheck: + w.EnqueueForRetry(*qualifiedName) + case StatusNotSynced: + w.EnqueueForClusterSync(*qualifiedName) + } + } +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/disable.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/disable.go new file mode 100644 index 000000000..1c23b1877 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/disable.go @@ -0,0 +1,378 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubefedctl + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + apiextv1b1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/klog" + + "sigs.k8s.io/kubefed/pkg/apis/core/typeconfig" + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/enable" + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +const ( + federatedGroupUsage = "The name of the API group to use for deleting the federated CRD type when the federated type config does not exist. Only used with --delete-crd." + targetVersionUsage = "The API version of the target type to use for deletion of the federated CRD type when the federated type config does not exist. Only used with --delete-crd." +) + +var ( + disable_long = ` + Disables propagation of a Kubernetes API type. This command + can also optionally delete the API resources added by the enable + command. + + Current context is assumed to be a Kubernetes cluster hosting + the kubefed control plane. Please use the + --host-cluster-context flag otherwise.` + + disable_example = ` + # Disable propagation of the kubernetes API type 'Deployment', named + in FederatedTypeConfig as 'deployments.apps' + kubefedctl disable deployments.apps + + # Disable propagation of the kubernetes API type 'Deployment', named + in FederatedTypeConfig as 'deployments.apps', and delete the + corresponding Federated API resource + kubefedctl disable deployments.apps --delete-crd` +) + +type disableType struct { + options.GlobalSubcommandOptions + options.CommonEnableOptions + disableTypeOptions +} + +type disableTypeOptions struct { + deleteCRD bool + enableTypeDirective *enable.EnableTypeDirective +} + +// Bind adds the disable specific arguments to the flagset passed in as an +// argument. +func (o *disableTypeOptions) Bind(flags *pflag.FlagSet) { + flags.BoolVar(&o.deleteCRD, "delete-crd", false, "Whether to remove the API resource added by 'enable'.") +} + +// NewCmdTypeDisable defines the `disable` command that +// disables federation of a Kubernetes API type. +func NewCmdTypeDisable(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &disableType{} + + cmd := &cobra.Command{ + Use: "disable NAME", + Short: "Disables propagation of a Kubernetes API type", + Long: disable_long, + Example: disable_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.Run(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + opts.CommonSubcommandBind(flags, federatedGroupUsage, targetVersionUsage) + opts.Bind(flags) + + return cmd +} + +// Complete ensures that options are valid and marshals them if necessary. +func (j *disableType) Complete(args []string) error { + j.enableTypeDirective = enable.NewEnableTypeDirective() + directive := j.enableTypeDirective + + if err := j.SetName(args); err != nil { + return err + } + + if !j.deleteCRD { + if len(j.TargetVersion) > 0 { + return errors.New("--version flag valid only with --delete-crd") + } else if j.FederatedGroup != options.DefaultFederatedGroup { + return errors.New("--kubefed-group flag valid only with --delete-crd") + } + } + + if len(j.TargetVersion) > 0 { + directive.Spec.TargetVersion = j.TargetVersion + } + if len(j.FederatedGroup) > 0 { + directive.Spec.FederatedGroup = j.FederatedGroup + } + + return nil +} + +// Run is the implementation of the `disable` command. +func (j *disableType) Run(cmdOut io.Writer, config util.FedConfig) error { + hostConfig, err := config.HostConfig(j.HostClusterContext, j.Kubeconfig) + if err != nil { + return errors.Wrap(err, "Failed to get host cluster config") + } + + // If . is specified, the target name is assumed as a group qualified name. + // In such case, ignore the lookup to make sure deletion of a federatedtypeconfig + // for which the corresponding target has been removed. + name := j.TargetName + if !strings.Contains(j.TargetName, ".") { + apiResource, err := enable.LookupAPIResource(hostConfig, j.TargetName, "") + if err != nil { + return err + } + name = typeconfig.GroupQualifiedName(*apiResource) + } + + typeConfigName := ctlutil.QualifiedName{ + Namespace: j.KubeFedNamespace, + Name: name, + } + j.enableTypeDirective.Name = typeConfigName.Name + return DisableFederation(cmdOut, hostConfig, j.enableTypeDirective, typeConfigName, j.deleteCRD, j.DryRun, true) +} + +func DisableFederation(cmdOut io.Writer, config *rest.Config, enableTypeDirective *enable.EnableTypeDirective, + typeConfigName ctlutil.QualifiedName, deleteCRD, dryRun, verifyStopped bool) error { + client, err := genericclient.New(config) + if err != nil { + return errors.Wrap(err, "Failed to get kubefed clientset") + } + + write := func(data string) { + if cmdOut == nil { + return + } + + if _, err := cmdOut.Write([]byte(data)); err != nil { + klog.Fatalf("Unexpected err: %v\n", err) + } + } + + typeConfig := &fedv1b1.FederatedTypeConfig{} + ftcExists, err := checkFederatedTypeConfigExists(client, typeConfig, typeConfigName, write) + if err != nil { + return err + } + + if dryRun { + return nil + } + + // Disable propagation and verify it is stopped before deleting the CRD + // when no custom resources exist. This avoids spurious error messages in + // the controller manager log as watches are terminated and cannot be + // reestablished. + if ftcExists { + if deleteCRD { + err = checkFederatedTypeCustomResourcesExist(config, typeConfig, write) + if err != nil { + return err + } + } + if typeConfig.GetPropagationEnabled() { + err = disablePropagation(client, typeConfig, typeConfigName, write) + if err != nil { + return err + } + } + if verifyStopped { + err = verifyPropagationControllerStopped(client, typeConfigName, write) + if err != nil { + return err + } + } + } + + if deleteCRD { + if !ftcExists { + typeConfig, err = generatedFederatedTypeConfig(config, enableTypeDirective) + if err != nil { + return err + } + } + err = deleteFederatedType(config, typeConfig, write) + if err != nil { + return err + } + } + + if ftcExists { + err = deleteFederatedTypeConfig(client, typeConfig, typeConfigName, write) + if err != nil { + return err + } + } + + return nil +} + +func checkFederatedTypeConfigExists(client genericclient.Client, typeConfig *fedv1b1.FederatedTypeConfig, typeConfigName ctlutil.QualifiedName, write func(string)) (bool, error) { + err := client.Get(context.TODO(), typeConfig, typeConfigName.Namespace, typeConfigName.Name) + if err == nil { + return true, nil + } + + if apierrors.IsNotFound(err) { + write(fmt.Sprintf("FederatedTypeConfig %q does not exist\n", typeConfigName)) + return false, nil + } + + return false, errors.Wrapf(err, "Error retrieving FederatedTypeConfig %q", typeConfigName) +} + +func disablePropagation(client genericclient.Client, typeConfig *fedv1b1.FederatedTypeConfig, typeConfigName ctlutil.QualifiedName, write func(string)) error { + if typeConfig.GetPropagationEnabled() { + typeConfig.Spec.Propagation = fedv1b1.PropagationDisabled + err := client.Update(context.TODO(), typeConfig) + if err != nil { + return errors.Wrapf(err, "Error disabling propagation for FederatedTypeConfig %q", typeConfigName) + } + write(fmt.Sprintf("Disabled propagation for FederatedTypeConfig %q\n", typeConfigName)) + } else { + write(fmt.Sprintf("Propagation already disabled for FederatedTypeConfig %q\n", typeConfigName)) + } + return nil +} + +func verifyPropagationControllerStopped(client genericclient.Client, typeConfigName ctlutil.QualifiedName, write func(string)) error { + write(fmt.Sprintf("Verifying propagation controller is stopped for FederatedTypeConfig %q\n", typeConfigName)) + + var typeConfig *fedv1b1.FederatedTypeConfig + err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { + typeConfig = &fedv1b1.FederatedTypeConfig{} + err := client.Get(context.TODO(), typeConfig, typeConfigName.Namespace, typeConfigName.Name) + if err != nil { + klog.Errorf("Error retrieving FederatedTypeConfig %q: %v", typeConfigName, err) + return false, nil + } + if typeConfig.Status.PropagationController == fedv1b1.ControllerStatusNotRunning { + return true, nil + } + return false, nil + }) + + if err != nil { + return errors.Wrapf(err, "Unable to verify propagation controller for FederatedTypeConfig %q is stopped: %v", typeConfigName, err) + } + + write(fmt.Sprintf("Propagation controller for FederatedTypeConfig %q is stopped\n", typeConfigName)) + return nil +} + +func deleteFederatedTypeConfig(client genericclient.Client, typeConfig *fedv1b1.FederatedTypeConfig, typeConfigName ctlutil.QualifiedName, write func(string)) error { + err := client.Delete(context.TODO(), typeConfig, typeConfig.Namespace, typeConfig.Name) + if err != nil { + return errors.Wrapf(err, "Error deleting FederatedTypeConfig %q", typeConfigName) + } + write(fmt.Sprintf("federatedtypeconfig %q deleted\n", typeConfigName)) + return nil +} + +func generatedFederatedTypeConfig(config *rest.Config, enableTypeDirective *enable.EnableTypeDirective) (*fedv1b1.FederatedTypeConfig, error) { + apiResource, err := enable.LookupAPIResource(config, enableTypeDirective.Name, enableTypeDirective.Spec.TargetVersion) + if err != nil { + return nil, err + } + typeConfig := enable.GenerateTypeConfigForTarget(*apiResource, enableTypeDirective).(*fedv1b1.FederatedTypeConfig) + return typeConfig, nil +} + +func deleteFederatedType(config *rest.Config, typeConfig typeconfig.Interface, write func(string)) error { + err := checkFederatedTypeCustomResourcesExist(config, typeConfig, write) + if err != nil { + return err + } + + crdName := typeconfig.GroupQualifiedName(typeConfig.GetFederatedType()) + err = deleteFederatedCRD(config, crdName, write) + if err != nil { + return err + } + + return nil +} + +func checkFederatedTypeCustomResourcesExist(config *rest.Config, typeConfig typeconfig.Interface, write func(string)) error { + federatedTypeAPIResource := typeConfig.GetFederatedType() + crdName := typeconfig.GroupQualifiedName(federatedTypeAPIResource) + exists, err := customResourcesExist(config, &federatedTypeAPIResource) + if err != nil { + return err + } else if exists { + return errors.Errorf("Cannot delete CRD %q while resource instances exist. Please try kubefedctl disable again after removing the resource instances or without the '--delete-crd' option\n", crdName) + } + return nil +} + +func customResourcesExist(config *rest.Config, resource *metav1.APIResource) (bool, error) { + client, err := ctlutil.NewResourceClient(config, resource) + if err != nil { + return false, err + } + + options := metav1.ListOptions{} + objList, err := client.Resources("").List(options) + if apierrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, err + } + return len(objList.Items) != 0, nil +} + +func deleteFederatedCRD(config *rest.Config, crdName string, write func(string)) error { + client, err := apiextv1b1client.NewForConfig(config) + if err != nil { + return errors.Wrap(err, "Error creating crd client") + } + + err = client.CustomResourceDefinitions().Delete(crdName, nil) + if apierrors.IsNotFound(err) { + write(fmt.Sprintf("customresourcedefinition %q does not exist\n", crdName)) + } else if err != nil { + return errors.Wrapf(err, "Error deleting crd %q", crdName) + } else { + write(fmt.Sprintf("customresourcedefinition %q deleted\n", crdName)) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/deprecatedapis.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/deprecatedapis.go new file mode 100644 index 000000000..22d77ea1f --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/deprecatedapis.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enable + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" +) + +// Deprecated APIs removed in 1.16 will be served by current equivalent APIs +// https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/ +// +// Only allow one of the equivalent APIs for federation to avoid the possibility +// of multiple sync controllers fighting to update the same resource +var equivalentAPIs = map[string][]schema.GroupVersion{ + "deployments": { + { + Group: "apps", + Version: "v1", + }, + { + Group: "apps", + Version: "v1beta1", + }, + { + Group: "apps", + Version: "v1beta2", + }, + { + Group: "extensions", + Version: "v1beta1", + }, + }, + "daemonsets": { + { + Group: "apps", + Version: "v1", + }, + { + Group: "apps", + Version: "v1beta1", + }, + { + Group: "apps", + Version: "v1beta2", + }, + { + Group: "extensions", + Version: "v1beta1", + }, + }, + "statefulsets": { + { + Group: "apps", + Version: "v1", + }, + { + Group: "apps", + Version: "v1beta1", + }, + { + Group: "apps", + Version: "v1beta2", + }, + }, + "replicasets": { + { + Group: "apps", + Version: "v1", + }, + { + Group: "apps", + Version: "v1beta1", + }, + { + Group: "apps", + Version: "v1beta2", + }, + { + Group: "extensions", + Version: "v1beta1", + }, + }, + "networkpolicies": { + { + Group: "networking.k8s.io", + Version: "v1", + }, + { + Group: "extensions", + Version: "v1beta1", + }, + }, + "podsecuritypolicies": { + { + Group: "policy", + Version: "v1beta1", + }, + { + Group: "extensions", + Version: "v1beta1", + }, + }, + "ingresses": { + { + Group: "networking.k8s.io", + Version: "v1beta1", + }, + { + Group: "extensions", + Version: "v1beta1", + }, + }, +} + +func IsEquivalentAPI(existingAPI, newAPI *fedv1b1.APIResource) bool { + if existingAPI.PluralName != newAPI.PluralName { + return false + } + + apis, ok := equivalentAPIs[existingAPI.PluralName] + if !ok { + return false + } + + for _, gv := range apis { + if gv.Group == existingAPI.Group && gv.Version == existingAPI.Version { + // skip exactly matched API from equivalent API list + continue + } + + if gv.Group == newAPI.Group && gv.Version == newAPI.Version { + return true + } + } + + return false +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/directive.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/directive.go new file mode 100644 index 000000000..48919900b --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/directive.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enable + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" +) + +// EnableTypeDirectiveSpec defines the desired state of EnableTypeDirective. +type EnableTypeDirectiveSpec struct { + // The API version of the target type. + // +optional + TargetVersion string `json:"targetVersion,omitempty"` + + // The name of the API group to use for generated federated types. + // +optional + FederatedGroup string `json:"federatedGroup,omitempty"` + + // The API version to use for generated federated types. + // +optional + FederatedVersion string `json:"federatedVersion,omitempty"` +} + +// TODO(marun) This should become a proper API type and drive enabling +// type federation via a controller. For now its only purpose is to +// enable loading of configuration from disk. +type EnableTypeDirective struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EnableTypeDirectiveSpec `json:"spec,omitempty"` +} + +func (ft *EnableTypeDirective) SetDefaults() { + ft.Spec.FederatedGroup = options.DefaultFederatedGroup + ft.Spec.FederatedVersion = options.DefaultFederatedVersion +} + +func NewEnableTypeDirective() *EnableTypeDirective { + ft := &EnableTypeDirective{} + ft.SetDefaults() + return ft +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/enable.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/enable.go new file mode 100644 index 000000000..ea5c8b114 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/enable.go @@ -0,0 +1,428 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enable + +import ( + "context" + "fmt" + "io" + + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextv1b1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + pkgruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/klog" + + "sigs.k8s.io/kubefed/pkg/apis/core/typeconfig" + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +const ( + federatedGroupUsage = "The name of the API group to use for the generated federated type." + targetVersionUsage = "Optional, the API version of the target type." +) + +var ( + enable_long = ` + Enables a Kubernetes API type (including a CRD) to be propagated + to clusters registered with a KubeFed control plane. A CRD for + the federated type will be generated and a FederatedTypeConfig will + be created to configure a sync controller. + + Current context is assumed to be a Kubernetes cluster hosting + the kubefed control plane. Please use the + --host-cluster-context flag otherwise.` + + enable_example = ` + # Enable federation of Deployments + kubefedctl enable deployments.apps --host-cluster-context=cluster1 + + # Enable federation of Deployments identified by name specified in + # deployment.yaml + kubefedctl enable -f deployment.yaml` +) + +type enableType struct { + options.GlobalSubcommandOptions + options.CommonEnableOptions + enableTypeOptions +} + +type enableTypeOptions struct { + federatedVersion string + output string + outputYAML bool + filename string + enableTypeDirective *EnableTypeDirective +} + +// Bind adds the join specific arguments to the flagset passed in as an +// argument. +func (o *enableTypeOptions) Bind(flags *pflag.FlagSet) { + flags.StringVar(&o.federatedVersion, "federated-version", options.DefaultFederatedVersion, "The API version to use for the generated federated type.") + flags.StringVarP(&o.output, "output", "o", "", "If provided, the resources that would be created in the API by the command are instead output to stdout in the provided format. Valid values are ['yaml'].") + flags.StringVarP(&o.filename, "filename", "f", "", "If provided, the command will be configured from the provided yaml file. Only --output will be accepted from the command line") +} + +// NewCmdTypeEnable defines the `enable` command that +// enables federation of a Kubernetes API type. +func NewCmdTypeEnable(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &enableType{} + + cmd := &cobra.Command{ + Use: "enable (NAME | -f FILENAME)", + Short: "Enables propagation of a Kubernetes API type", + Long: enable_long, + Example: enable_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.Run(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + opts.CommonSubcommandBind(flags, federatedGroupUsage, targetVersionUsage) + opts.Bind(flags) + + return cmd +} + +// Complete ensures that options are valid and marshals them if necessary. +func (j *enableType) Complete(args []string) error { + j.enableTypeDirective = NewEnableTypeDirective() + fd := j.enableTypeDirective + + if j.output == "yaml" { + j.outputYAML = true + } else if len(j.output) > 0 { + return errors.Errorf("Invalid value for --output: %s", j.output) + } + + if len(j.filename) > 0 { + err := DecodeYAMLFromFile(j.filename, fd) + if err != nil { + return errors.Wrapf(err, "Failed to load yaml from file %q", j.filename) + } + return nil + } + + if err := j.SetName(args); err != nil { + return err + } + + fd.Name = j.TargetName + + if len(j.TargetVersion) > 0 { + fd.Spec.TargetVersion = j.TargetVersion + } + if len(j.FederatedGroup) > 0 { + fd.Spec.FederatedGroup = j.FederatedGroup + } + if len(j.federatedVersion) > 0 { + fd.Spec.FederatedVersion = j.federatedVersion + } + + return nil +} + +// Run is the implementation of the `enable` command. +func (j *enableType) Run(cmdOut io.Writer, config util.FedConfig) error { + hostConfig, err := config.HostConfig(j.HostClusterContext, j.Kubeconfig) + if err != nil { + return errors.Wrap(err, "Failed to get host cluster config") + } + + resources, err := GetResources(hostConfig, j.enableTypeDirective) + if err != nil { + return err + } + + if j.outputYAML { + concreteTypeConfig := resources.TypeConfig.(*fedv1b1.FederatedTypeConfig) + objects := []pkgruntime.Object{concreteTypeConfig, resources.CRD} + err := writeObjectsToYAML(objects, cmdOut) + if err != nil { + return errors.Wrap(err, "Failed to write objects to YAML") + } + // -o yaml implies dry run + return nil + } + + return CreateResources(cmdOut, hostConfig, resources, j.KubeFedNamespace, j.DryRun) +} + +type typeResources struct { + TypeConfig typeconfig.Interface + CRD *apiextv1b1.CustomResourceDefinition +} + +func GetResources(config *rest.Config, enableTypeDirective *EnableTypeDirective) (*typeResources, error) { + apiResource, err := LookupAPIResource(config, enableTypeDirective.Name, enableTypeDirective.Spec.TargetVersion) + if err != nil { + return nil, err + } + klog.V(2).Infof("Found type %q", resourceKey(*apiResource)) + + typeConfig := GenerateTypeConfigForTarget(*apiResource, enableTypeDirective) + + accessor, err := newSchemaAccessor(config, *apiResource) + if err != nil { + return nil, errors.Wrap(err, "Error initializing validation schema accessor") + } + + shortNames := []string{} + for _, shortName := range apiResource.ShortNames { + shortNames = append(shortNames, fmt.Sprintf("f%s", shortName)) + } + + crd := federatedTypeCRD(typeConfig, accessor, shortNames) + + return &typeResources{ + TypeConfig: typeConfig, + CRD: crd, + }, nil +} + +// TODO(marun) Allow updates to the configuration for a type that has +// already been enabled for kubefed. This would likely involve +// updating the version of the target type and the validation of the schema. +func CreateResources(cmdOut io.Writer, config *rest.Config, resources *typeResources, namespace string, dryRun bool) error { + write := func(data string) { + if cmdOut != nil { + if _, err := cmdOut.Write([]byte(data)); err != nil { + klog.Fatalf("Unexpected err: %v\n", err) + } + } + } + + hostClientset, err := util.HostClientset(config) + if err != nil { + return errors.Wrap(err, "Failed to create host clientset") + } + _, err = hostClientset.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return errors.Wrapf(err, "KubeFed system namespace %q does not exist", namespace) + } else if err != nil { + return errors.Wrapf(err, "Error attempting to determine whether KubeFed system namespace %q exists", namespace) + } + + client, err := genericclient.New(config) + if err != nil { + return errors.Wrap(err, "Failed to get kubefed clientset") + } + + concreteTypeConfig := resources.TypeConfig.(*fedv1b1.FederatedTypeConfig) + existingTypeConfig := &fedv1b1.FederatedTypeConfig{} + err = client.Get(context.TODO(), existingTypeConfig, namespace, concreteTypeConfig.Name) + if err != nil && !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "Error retrieving FederatedTypeConfig %q", concreteTypeConfig.Name) + } + if err == nil { + fedType := existingTypeConfig.GetFederatedType() + target := existingTypeConfig.GetTargetType() + concreteType := concreteTypeConfig.GetFederatedType() + if fedType.Name != concreteType.Name || fedType.Version != concreteType.Version || fedType.Group != concreteType.Group { + return errors.Errorf("Federation is already enabled for %q with federated type %q. Changing the federated type to %q is not supported.", + qualifiedAPIResourceName(target), + qualifiedAPIResourceName(fedType), + qualifiedAPIResourceName(concreteType)) + } + } + + crdClient, err := apiextv1b1client.NewForConfig(config) + if err != nil { + return errors.Wrap(err, "Failed to create crd clientset") + } + + existingCRD, err := crdClient.CustomResourceDefinitions().Get(resources.CRD.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + if !dryRun { + _, err = crdClient.CustomResourceDefinitions().Create(resources.CRD) + if err != nil { + return errors.Wrapf(err, "Error creating CRD %q", resources.CRD.Name) + } + } + write(fmt.Sprintf("customresourcedefinition.apiextensions.k8s.io/%s created\n", resources.CRD.Name)) + } else if err != nil { + return errors.Wrapf(err, "Error getting CRD %q", resources.CRD.Name) + } else { + ftcs := &fedv1b1.FederatedTypeConfigList{} + err := client.List(context.TODO(), ftcs, namespace) + if err != nil { + return errors.Wrap(err, "Error getting FederatedTypeConfig list") + } + + for _, ftc := range ftcs.Items { + targetAPI := concreteTypeConfig.Spec.TargetType + existingAPI := ftc.Spec.TargetType + if IsEquivalentAPI(&existingAPI, &targetAPI) { + existingName := qualifiedAPIResourceName(ftc.GetTargetType()) + name := qualifiedAPIResourceName(concreteTypeConfig.GetTargetType()) + qualifiedFTCName := ctlutil.QualifiedName{ + Namespace: ftc.Namespace, + Name: ftc.Name, + } + + return errors.Errorf("Failed to enable %q. Federation of this type is already enabled for equivalent type %q by FederatedTypeConfig %q", + name, existingName, qualifiedFTCName) + } + + if concreteTypeConfig.Name == ftc.Name { + continue + } + + fedType := ftc.Spec.FederatedType + name := typeconfig.GroupQualifiedName(metav1.APIResource{Name: fedType.PluralName, Group: fedType.Group}) + if name == existingCRD.Name { + return errors.Errorf("Failed to enable federation of %q due to the FederatedTypeConfig for %q already referencing a federated type CRD named %q. If these target types are distinct despite sharing the same kind, specifying a non-default --federated-group should allow %q to be enabled.", + concreteTypeConfig.Name, ftc.Name, name, concreteTypeConfig.Name) + } + } + + existingCRD.Spec = resources.CRD.Spec + if !dryRun { + _, err = crdClient.CustomResourceDefinitions().Update(existingCRD) + if err != nil { + return errors.Wrapf(err, "Error updating CRD %q", resources.CRD.Name) + } + } + write(fmt.Sprintf("customresourcedefinition.apiextensions.k8s.io/%s updated\n", resources.CRD.Name)) + } + + concreteTypeConfig.Namespace = namespace + err = client.Get(context.TODO(), existingTypeConfig, namespace, concreteTypeConfig.Name) + createdOrUpdated := "created" + if err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "Error retrieving FederatedTypeConfig %q", concreteTypeConfig.Name) + } + if !dryRun { + err = client.Create(context.TODO(), concreteTypeConfig) + if err != nil { + return errors.Wrapf(err, "Error creating FederatedTypeConfig %q", concreteTypeConfig.Name) + } + } + } else { + existingTypeConfig.Spec = concreteTypeConfig.Spec + if !dryRun { + err = client.Update(context.TODO(), existingTypeConfig) + if err != nil { + return errors.Wrapf(err, "Error updating FederatedTypeConfig %q", concreteTypeConfig.Name) + } + } + createdOrUpdated = "updated" + } + write(fmt.Sprintf("federatedtypeconfig.core.kubefed.io/%s %s in namespace %s\n", + concreteTypeConfig.Name, createdOrUpdated, namespace)) + return nil +} + +func GenerateTypeConfigForTarget(apiResource metav1.APIResource, enableTypeDirective *EnableTypeDirective) typeconfig.Interface { + spec := enableTypeDirective.Spec + kind := apiResource.Kind + pluralName := apiResource.Name + typeConfig := &fedv1b1.FederatedTypeConfig{ + // Explicitly including TypeMeta will ensure it will be + // serialized properly to yaml. + TypeMeta: metav1.TypeMeta{ + Kind: "FederatedTypeConfig", + APIVersion: "core.kubefed.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: typeconfig.GroupQualifiedName(apiResource), + }, + Spec: fedv1b1.FederatedTypeConfigSpec{ + TargetType: fedv1b1.APIResource{ + Version: apiResource.Version, + Kind: kind, + Scope: NamespacedToScope(apiResource), + }, + Propagation: fedv1b1.PropagationEnabled, + FederatedType: fedv1b1.APIResource{ + Group: spec.FederatedGroup, + Version: spec.FederatedVersion, + Kind: fmt.Sprintf("Federated%s", kind), + PluralName: fmt.Sprintf("federated%s", pluralName), + Scope: FederatedNamespacedToScope(apiResource), + }, + }, + } + + // Set defaults that would normally be set by the api + fedv1b1.SetFederatedTypeConfigDefaults(typeConfig) + return typeConfig +} + +func qualifiedAPIResourceName(resource metav1.APIResource) string { + if resource.Group == "" { + return fmt.Sprintf("%s/%s", resource.Name, resource.Version) + } + return fmt.Sprintf("%s.%s/%s", resource.Name, resource.Group, resource.Version) +} + +func federatedTypeCRD(typeConfig typeconfig.Interface, accessor schemaAccessor, shortNames []string) *apiextv1b1.CustomResourceDefinition { + templateSchema := accessor.templateSchema() + schema := federatedTypeValidationSchema(templateSchema) + return CrdForAPIResource(typeConfig.GetFederatedType(), schema, shortNames) +} + +func writeObjectsToYAML(objects []pkgruntime.Object, w io.Writer) error { + for _, obj := range objects { + if _, err := w.Write([]byte("---\n")); err != nil { + return errors.Wrap(err, "Error encoding object to yaml") + } + + if err := writeObjectToYAML(obj, w); err != nil { + return errors.Wrap(err, "Error encoding object to yaml") + } + } + return nil +} + +func writeObjectToYAML(obj pkgruntime.Object, w io.Writer) error { + json, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(obj) + if err != nil { + return err + } + + unstructuredObj := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode(json, nil, unstructuredObj); err != nil { + return err + } + + return util.WriteUnstructuredToYaml(unstructuredObj, w) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/schema.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/schema.go new file mode 100644 index 000000000..ec16c208c --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/schema.go @@ -0,0 +1,221 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enable + +import ( + "fmt" + + "github.com/pkg/errors" + + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextv1b1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kubectl/pkg/util/openapi" +) + +type schemaAccessor interface { + templateSchema() map[string]apiextv1b1.JSONSchemaProps +} + +func newSchemaAccessor(config *rest.Config, apiResource metav1.APIResource) (schemaAccessor, error) { + // Assume the resource may be a CRD, and fall back to OpenAPI if that is not the case. + crdAccessor, err := newCRDSchemaAccessor(config, apiResource) + if err != nil { + return nil, err + } + if crdAccessor != nil { + return crdAccessor, nil + } + return newOpenAPISchemaAccessor(config, apiResource) +} + +type crdSchemaAccessor struct { + validation *apiextv1b1.CustomResourceValidation +} + +func newCRDSchemaAccessor(config *rest.Config, apiResource metav1.APIResource) (schemaAccessor, error) { + // CRDs must have a group + if len(apiResource.Group) == 0 { + return nil, nil + } + // Check whether the target resource is a crd + crdClient, err := apiextv1b1client.NewForConfig(config) + if err != nil { + return nil, errors.Wrap(err, "Failed to create crd clientset") + } + crdName := fmt.Sprintf("%s.%s", apiResource.Name, apiResource.Group) + crd, err := crdClient.CustomResourceDefinitions().Get(crdName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, errors.Wrapf(err, "Error attempting retrieval of crd %q", crdName) + } + return &crdSchemaAccessor{validation: crd.Spec.Validation}, nil +} + +func (a *crdSchemaAccessor) templateSchema() map[string]apiextv1b1.JSONSchemaProps { + if a.validation != nil && a.validation.OpenAPIV3Schema != nil { + return a.validation.OpenAPIV3Schema.Properties + } + return nil +} + +type openAPISchemaAccessor struct { + targetResource proto.Schema +} + +func newOpenAPISchemaAccessor(config *rest.Config, apiResource metav1.APIResource) (schemaAccessor, error) { + client, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, errors.Wrap(err, "Error creating discovery client") + } + resources, err := openapi.NewOpenAPIGetter(client).Get() + if err != nil { + return nil, errors.Wrap(err, "Error loading openapi schema") + } + gvk := schema.GroupVersionKind{ + Group: apiResource.Group, + Version: apiResource.Version, + Kind: apiResource.Kind, + } + targetResource := resources.LookupResource(gvk) + if targetResource == nil { + return nil, errors.Errorf("Unable to find openapi schema for %q", gvk) + } + return &openAPISchemaAccessor{ + targetResource: targetResource, + }, nil +} + +func (a *openAPISchemaAccessor) templateSchema() map[string]apiextv1b1.JSONSchemaProps { + var templateSchema *apiextv1b1.JSONSchemaProps + visitor := &jsonSchemaVistor{ + collect: func(schema apiextv1b1.JSONSchemaProps) { + templateSchema = &schema + }, + } + a.targetResource.Accept(visitor) + + return templateSchema.Properties +} + +// jsonSchemaVistor converts proto.Schema resources into json schema. +// A local visitor (and associated callback) is intended to be created +// whenever a function needs to recurse. +// +// TODO(marun) Generate more extensive schema if/when openapi schema +// provides more detail as per https://github.com/ant31/crd-validation +type jsonSchemaVistor struct { + collect func(schema apiextv1b1.JSONSchemaProps) +} + +func (v *jsonSchemaVistor) VisitArray(a *proto.Array) { + arraySchema := apiextv1b1.JSONSchemaProps{ + Type: "array", + Items: &apiextv1b1.JSONSchemaPropsOrArray{}, + } + localVisitor := &jsonSchemaVistor{ + collect: func(schema apiextv1b1.JSONSchemaProps) { + arraySchema.Items.Schema = &schema + }, + } + a.SubType.Accept(localVisitor) + v.collect(arraySchema) +} + +func (v *jsonSchemaVistor) VisitMap(m *proto.Map) { + mapSchema := apiextv1b1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1b1.JSONSchemaPropsOrBool{ + Allows: true, + }, + } + localVisitor := &jsonSchemaVistor{ + collect: func(schema apiextv1b1.JSONSchemaProps) { + mapSchema.AdditionalProperties.Schema = &schema + }, + } + m.SubType.Accept(localVisitor) + v.collect(mapSchema) +} + +func (v *jsonSchemaVistor) VisitPrimitive(p *proto.Primitive) { + schema := schemaForPrimitive(p) + v.collect(schema) +} + +func (v *jsonSchemaVistor) VisitKind(k *proto.Kind) { + kindSchema := apiextv1b1.JSONSchemaProps{ + Type: "object", + Properties: make(map[string]apiextv1b1.JSONSchemaProps), + Required: k.RequiredFields, + } + for key, fieldSchema := range k.Fields { + // Status cannot be defined for a template + if key == "status" { + continue + } + localVisitor := &jsonSchemaVistor{ + collect: func(schema apiextv1b1.JSONSchemaProps) { + kindSchema.Properties[key] = schema + }, + } + fieldSchema.Accept(localVisitor) + } + v.collect(kindSchema) +} + +func (v *jsonSchemaVistor) VisitReference(r proto.Reference) { + // Short-circuit the recursive definition of JSONSchemaProps (used for CRD validation) + // + // TODO(marun) Implement proper support for recursive schema + if r.Reference() == "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps" { + v.collect(apiextv1b1.JSONSchemaProps{Type: "object"}) + return + } + + r.SubSchema().Accept(v) +} + +func schemaForPrimitive(p *proto.Primitive) apiextv1b1.JSONSchemaProps { + schema := apiextv1b1.JSONSchemaProps{} + + if p.Format == "int-or-string" { + schema.AnyOf = []apiextv1b1.JSONSchemaProps{ + { + Type: "integer", + Format: "int32", + }, + { + Type: "string", + }, + } + return schema + } + + if len(p.Format) > 0 { + schema.Format = p.Format + } + schema.Type = p.Type + return schema +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/util.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/util.go new file mode 100644 index 000000000..44f596a44 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/util.go @@ -0,0 +1,199 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enable + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/pkg/errors" + + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + "sigs.k8s.io/kubefed/pkg/apis/core/common" + "sigs.k8s.io/kubefed/pkg/apis/core/typeconfig" +) + +func DecodeYAMLFromFile(filename string, obj interface{}) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return DecodeYAML(f, obj) +} + +func DecodeYAML(r io.Reader, obj interface{}) error { + decoder := yaml.NewYAMLToJSONDecoder(r) + return decoder.Decode(obj) +} + +func CrdForAPIResource(apiResource metav1.APIResource, validation *apiextv1b1.CustomResourceValidation, shortNames []string) *apiextv1b1.CustomResourceDefinition { + scope := apiextv1b1.ClusterScoped + if apiResource.Namespaced { + scope = apiextv1b1.NamespaceScoped + } + return &apiextv1b1.CustomResourceDefinition{ + // Explicitly including TypeMeta will ensure it will be + // serialized properly to yaml. + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: "apiextensions.k8s.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: typeconfig.GroupQualifiedName(apiResource), + }, + Spec: apiextv1b1.CustomResourceDefinitionSpec{ + Group: apiResource.Group, + Version: apiResource.Version, + Scope: scope, + Names: apiextv1b1.CustomResourceDefinitionNames{ + Plural: apiResource.Name, + Kind: apiResource.Kind, + ShortNames: shortNames, + }, + Validation: validation, + Subresources: &apiextv1b1.CustomResourceSubresources{ + Status: &apiextv1b1.CustomResourceSubresourceStatus{}, + }, + }, + } +} + +func LookupAPIResource(config *rest.Config, key, targetVersion string) (*metav1.APIResource, error) { + resourceLists, err := GetServerPreferredResources(config) + if err != nil { + return nil, err + } + + var targetResource *metav1.APIResource + var matchedResources []string + for _, resourceList := range resourceLists { + // The list holds the GroupVersion for its list of APIResources + gv, err := schema.ParseGroupVersion(resourceList.GroupVersion) + if err != nil { + return nil, errors.Wrap(err, "Error parsing GroupVersion") + } + if len(targetVersion) > 0 && gv.Version != targetVersion { + continue + } + for _, resource := range resourceList.APIResources { + group := gv.Group + if NameMatchesResource(key, resource, group) { + if targetResource == nil { + targetResource = resource.DeepCopy() + targetResource.Group = group + targetResource.Version = gv.Version + } + matchedResources = append(matchedResources, groupQualifiedName(resource.Name, gv.Group)) + } + } + + } + if len(matchedResources) > 1 { + return nil, errors.Errorf("Multiple resources are matched by %q: %s. A group-qualified plural name must be provided.", key, strings.Join(matchedResources, ", ")) + } + + if targetResource != nil { + return targetResource, nil + } + + return nil, errors.Errorf("Unable to find api resource named %q.", key) +} + +func NameMatchesResource(name string, apiResource metav1.APIResource, group string) bool { + lowerCaseName := strings.ToLower(name) + if lowerCaseName == apiResource.Name || + lowerCaseName == apiResource.SingularName || + lowerCaseName == strings.ToLower(apiResource.Kind) || + lowerCaseName == fmt.Sprintf("%s.%s", apiResource.Name, group) { + return true + } + for _, shortName := range apiResource.ShortNames { + if lowerCaseName == strings.ToLower(shortName) { + return true + } + } + + return false +} + +func GetServerPreferredResources(config *rest.Config) ([]*metav1.APIResourceList, error) { + // TODO(marun) Consider using a caching scheme ala kubectl + client, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, errors.Wrap(err, "Error creating discovery client") + } + + resourceLists, err := client.ServerPreferredResources() + if err != nil { + return nil, errors.Wrap(err, "Error listing api resources") + } + return resourceLists, nil +} + +func NamespacedToScope(apiResource metav1.APIResource) apiextv1b1.ResourceScope { + if apiResource.Namespaced { + return apiextv1b1.NamespaceScoped + } + return apiextv1b1.ClusterScoped +} + +func FederatedNamespacedToScope(apiResource metav1.APIResource) apiextv1b1.ResourceScope { + // Special-case the scope of federated namespace since it will + // hopefully be the only instance of the scope of a federated + // type differing from the scope of its target. + if typeconfig.GroupQualifiedName(apiResource) == common.NamespaceName { + // FederatedNamespace is namespaced to allow the control plane to run + // with only namespace-scoped permissions e.g. to determine placement. + return apiextv1b1.NamespaceScoped + } + return NamespacedToScope(apiResource) +} + +func resourceKey(apiResource metav1.APIResource) string { + var group string + if len(apiResource.Group) == 0 { + group = "core" + } else { + group = apiResource.Group + } + var version string + if len(apiResource.Version) == 0 { + version = "v1" + } else { + version = apiResource.Version + } + return fmt.Sprintf("%s.%s/%s", apiResource.Name, group, version) +} + +func groupQualifiedName(name, group string) string { + apiResource := metav1.APIResource{ + Name: name, + Group: group, + } + + return typeconfig.GroupQualifiedName(apiResource) +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/validation.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/validation.go new file mode 100644 index 000000000..7314904cb --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/enable/validation.go @@ -0,0 +1,260 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enable + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/kubefed/pkg/controller/util" +) + +func federatedTypeValidationSchema(templateSchema map[string]v1beta1.JSONSchemaProps) *v1beta1.CustomResourceValidation { + schema := ValidationSchema(v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "placement": { + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + // References to one or more clusters allow a + // scheduling mechanism to explicitly indicate + // placement. If one or more clusters is provided, + // the clusterSelector field will be ignored. + "clusters": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "name": { + Type: "string", + }, + }, + Required: []string{ + "name", + }, + }, + }, + }, + "clusterSelector": { + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "matchExpressions": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "key": { + Type: "string", + }, + "operator": { + Type: "string", + }, + "values": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + }, + Required: []string{ + "key", + "operator", + }, + }, + }, + }, + "matchLabels": { + Type: "object", + AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + }, + }, + }, + }, + "overrides": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "clusterName": { + Type: "string", + }, + "clusterOverrides": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "op": { + Type: "string", + Pattern: "^(add|remove|replace)?$", + }, + "path": { + Type: "string", + }, + "value": { + // Supporting the override of an arbitrary field + // precludes up-front validation. Errors in + // the definition of override values will need to + // be caught during propagation. + AnyOf: []v1beta1.JSONSchemaProps{ + { + Type: "string", + }, + { + Type: "integer", + }, + { + Type: "boolean", + }, + { + Type: "object", + }, + { + Type: "array", + }, + }, + }, + }, + Required: []string{ + "path", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }) + if templateSchema != nil { + specProperties := schema.OpenAPIV3Schema.Properties["spec"].Properties + specProperties["template"] = v1beta1.JSONSchemaProps{ + Type: "object", + } + // Add retainReplicas field to types that exposes a replicas + // field that could be targeted by HPA. + if templateSpec, ok := templateSchema["spec"]; ok { + // TODO: find a simpler way to detect that a resource is scalable than having to compute the entire schema. + if replicasField, ok := templateSpec.Properties["replicas"]; ok { + if replicasField.Type == "integer" && replicasField.Format == "int32" { + specProperties[util.RetainReplicasField] = v1beta1.JSONSchemaProps{ + Type: "boolean", + } + } + } + } + + } + return schema +} + +func ValidationSchema(specProps v1beta1.JSONSchemaProps) *v1beta1.CustomResourceValidation { + return &v1beta1.CustomResourceValidation{ + OpenAPIV3Schema: &v1beta1.JSONSchemaProps{ + Properties: map[string]v1beta1.JSONSchemaProps{ + "apiVersion": { + Type: "string", + }, + "kind": { + Type: "string", + }, + // TODO(marun) Add a comprehensive schema for metadata + "metadata": { + Type: "object", + }, + "spec": specProps, + "status": { + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "conditions": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "type": { + Type: "string", + }, + "status": { + Type: "string", + }, + "reason": { + Type: "string", + }, + "lastUpdateTime": { + Format: "date-time", + Type: "string", + }, + "lastTransitionTime": { + Format: "date-time", + Type: "string", + }, + }, + Required: []string{ + "type", + "status", + }, + }, + }, + }, + "clusters": { + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: map[string]v1beta1.JSONSchemaProps{ + "name": { + Type: "string", + }, + "status": { + Type: "string", + }, + }, + Required: []string{ + "name", + }, + }, + }, + }, + "observedGeneration": { + Format: "int64", + Type: "integer", + }, + }, + }, + }, + // Require a spec (even if empty) as an aid to users + // manually creating federated configmaps or + // secrets. These target types do not include a spec, + // and the absence of the spec in a federated + // equivalent could indicate a malformed resource. + Required: []string{ + "spec", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/federate/federate.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/federate/federate.go new file mode 100644 index 000000000..df9097714 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/federate/federate.go @@ -0,0 +1,552 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federate + +import ( + "context" + "io" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/klog" + + "sigs.k8s.io/kubefed/pkg/apis/core/typeconfig" + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/enable" + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +const ( + createResourceRetryTimeout = 10 * time.Second + createResourceRetryInterval = 1 * time.Second +) + +var ( + // Controller created resources should always be skipped while federating content + controllerCreatedAPIResourceNames = []string{ + "endpoints", + "events", + "events.events.k8s.io", + "propagatedversions.core.kubefed.io", + } + + federate_long = ` + Federate creates a federated resource from a kubernetes resource. + The target resource must exist in the cluster hosting the kubefed + control plane. If the federated resource needs to be created in the + API, the control plane must have a FederatedTypeConfig for the type + of the kubernetes resource. If using with flag '-o yaml', it is not + necessary for the FederatedTypeConfig to exist (or even for the + kubefed API to be installed in the cluster). + + Current context is assumed to be a Kubernetes cluster hosting + the kubefed control plane. Please use the --host-cluster-context + flag otherwise.` + + federate_example = ` + # Federate resource named "my-cm" in namespace "my-ns" of kubernetes type "configmaps" (identified by short name "cm") + kubefedctl federate cm "my-cm" -n "my-ns" --host-cluster-context=cluster1` +) + +type federateResource struct { + options.GlobalSubcommandOptions + typeName string + resourceName string + resourceNamespace string + output string + outputYAML bool + enableType bool + federateContents bool + filename string + skipAPIResourceNames []string +} + +func (j *federateResource) Bind(flags *pflag.FlagSet) { + flags.StringVarP(&j.resourceNamespace, "namespace", "n", "", "The namespace of the resource to federate.") + flags.StringVarP(&j.output, "output", "o", "", "If provided, the resource that would be created in the API by the command is instead output to stdout in the provided format. Valid format is ['yaml'].") + flags.BoolVarP(&j.enableType, "enable-type", "t", false, "If true, attempt to enable federation of the API type of the resource before creating the federated resource.") + flags.BoolVarP(&j.federateContents, "contents", "c", false, "Applicable only to namespaces. If provided, the command will federate all resources within the namespace after federating the namespace.") + flags.StringVarP(&j.filename, "filename", "f", "", "If specified, the provided yaml file will be used as the input for target resources to federate. This mode will only emit federated resource yaml to standard output. Other flag options if provided will be ignored.") + flags.StringSliceVarP(&j.skipAPIResourceNames, "skip-api-resources", "s", []string{}, "Comma separated names of the api resources to skip when federating contents in a namespace. Name could be short name "+ + "(e.g. 'deploy), kind (e.g. 'deployment'), plural name (e.g. 'deployments'), group qualified plural name (e.g. 'deployments.apps') or group name itself (e.g. 'apps') to skip the whole group.") +} + +// Complete ensures that options are valid. +func (j *federateResource) Complete(args []string) error { + if j.output == "yaml" { + j.outputYAML = true + } else if len(j.output) > 0 { + return errors.Errorf("Invalid value for --output: %s", j.output) + } + + if len(j.filename) > 0 { + if len(args) > 0 { + return errors.Errorf("Flag '--filename' does not take any args. Got args: %v", args) + } + return nil + } + + if len(args) == 0 { + return errors.New("TYPE-NAME is required") + } + j.typeName = args[0] + + if len(args) == 1 { + return errors.New("RESOURCE-NAME is required") + } + j.resourceName = args[1] + + if j.enableType && j.outputYAML { + return errors.New("Flag '--enable-type' cannot be used with '--output [yaml]'") + } + + return nil +} + +// NewCmdFederateResource defines the `federate` command that federates a +// Kubernetes resource of the given kubernetes type. +func NewCmdFederateResource(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &federateResource{} + + cmd := &cobra.Command{ + Use: "federate TYPE-NAME RESOURCE-NAME", + Short: "Federate creates a federated resource from a kubernetes resource", + Long: federate_long, + Example: federate_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.Run(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + opts.Bind(flags) + + return cmd +} + +// Run is the implementation of the `federate resource` command. +func (j *federateResource) Run(cmdOut io.Writer, config util.FedConfig) error { + + if len(j.resourceNamespace) == 0 { + var err error + j.resourceNamespace, err = util.GetNamespace(j.HostClusterContext, j.Kubeconfig, config) + if err != nil { + return err + } + } + + hostConfig, err := config.HostConfig(j.HostClusterContext, j.Kubeconfig) + if err != nil { + return errors.Wrap(err, "Failed to get host cluster config") + } + + if len(j.filename) > 0 { + resources, err := DecodeUnstructuredFromFile(j.filename) + if err != nil { + return errors.Wrapf(err, "Failed to load yaml from file %q", j.filename) + } + federatedResources, err := FederateResources(resources) + if err != nil { + return err + } + + err = WriteUnstructuredObjsToYaml(federatedResources, cmdOut) + if err != nil { + return errors.Wrap(err, "Failed to write federated resources to YAML") + } + return nil + + } + + qualifiedResourceName := ctlutil.QualifiedName{ + Namespace: j.resourceNamespace, + Name: j.resourceName, + } + artifacts, err := GetFederateArtifacts(hostConfig, j.typeName, j.KubeFedNamespace, qualifiedResourceName, j.enableType, j.outputYAML) + if err != nil { + return err + } + artifactsList := []*FederateArtifacts{} + artifactsList = append(artifactsList, artifacts) + + kind := artifacts.typeConfig.GetTargetType().Kind + if kind != ctlutil.NamespaceKind && j.federateContents { + return errors.New("Flag '--contents' can only be used with type 'namespaces'.") + } + + if kind == ctlutil.NamespaceKind && j.federateContents { + containedArtifactsList, err := GetContainedArtifactsList(hostConfig, j.resourceName, j.KubeFedNamespace, j.skipAPIResourceNames, j.enableType, j.outputYAML) + if err != nil { + return err + } + artifactsList = append(artifactsList, containedArtifactsList...) + } + + if j.outputYAML { + for _, artifacts := range artifactsList { + err := WriteUnstructuredObjsToYaml(artifacts.federatedResources, cmdOut) + if err != nil { + return errors.Wrap(err, "Failed to write federated resource to YAML") + } + } + return nil + } + + return CreateResources(cmdOut, hostConfig, artifactsList, j.KubeFedNamespace, j.enableType, j.DryRun) +} + +func FederateResources(resources []*unstructured.Unstructured) ([]*unstructured.Unstructured, error) { + var federatedResources []*unstructured.Unstructured + for _, targetResource := range resources { + // A Group, a Version and a Kind is sufficient for API Resource definition. + gvk := targetResource.GroupVersionKind() + + // Construct an API Resource from above info. + // TODO(irfanurrehman) Should we depend on the lookup from the + // API Server instead, for some specific scenario? + plural, singular := apimeta.UnsafeGuessKindToResource(gvk) + apiResource := metav1.APIResource{ + Name: plural.Resource, + SingularName: singular.Resource, + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind, + } + apiResource.Namespaced = targetResource.GetNamespace() == "" + + qualifiedName := ctlutil.NewQualifiedName(targetResource) + typeConfig := enable.GenerateTypeConfigForTarget(apiResource, enable.NewEnableTypeDirective()) + federatedResource, err := FederatedResourceFromTargetResource(typeConfig, targetResource) + if err != nil { + return nil, errors.Wrapf(err, "Error getting %s from %s %q", typeConfig.GetFederatedType().Kind, typeConfig.GetTargetType().Kind, qualifiedName) + } + + federatedResources = append(federatedResources, federatedResource) + } + + return federatedResources, nil +} + +type FederateArtifacts struct { + // Identifies if typeConfig for this type is installed + typeConfigInstalled bool + + // Identifies the type + typeConfig typeconfig.Interface + // List of federated resources of this type + federatedResources []*unstructured.Unstructured +} + +func GetFederateArtifacts(hostConfig *rest.Config, typeName, kubefedNamespace string, qualifiedName ctlutil.QualifiedName, enableType, outputYAML bool) (*FederateArtifacts, error) { + // Lookup kubernetes API availability + apiResource, err := enable.LookupAPIResource(hostConfig, typeName, "") + if err != nil { + return nil, errors.Wrapf(err, "Failed to find target API resource %s", typeName) + } + klog.V(2).Infof("API Resource for %s found", typeName) + + typeConfigInstalled, typeConfig, err := getTypeConfig(hostConfig, *apiResource, kubefedNamespace, enableType, outputYAML) + if err != nil { + return nil, err + } + + targetResource, err := getTargetResource(hostConfig, typeConfig, qualifiedName) + if err != nil { + return nil, err + } + + federatedResource, err := FederatedResourceFromTargetResource(typeConfig, targetResource) + if err != nil { + return nil, errors.Wrapf(err, "Error getting %s from %s %q", typeConfig.GetFederatedType().Kind, typeConfig.GetTargetType().Kind, qualifiedName) + } + + var federatedResources []*unstructured.Unstructured + federatedResources = append(federatedResources, federatedResource) + return &FederateArtifacts{ + typeConfigInstalled: typeConfigInstalled, + typeConfig: typeConfig, + federatedResources: federatedResources, + }, nil +} + +func getTypeConfig(hostConfig *rest.Config, apiResource metav1.APIResource, kubefedNamespace string, enableType, outputYAML bool) (bool, typeconfig.Interface, error) { + resolvedTypeName := typeconfig.GroupQualifiedName(apiResource) + installedTypeConfig, err := getInstalledTypeConfig(hostConfig, resolvedTypeName, kubefedNamespace) + if err == nil { + return true, installedTypeConfig, nil + } + notFound := apierrors.IsNotFound(err) + if notFound && !outputYAML && !enableType { + return false, nil, errors.Errorf("%v. Consider using '--enable-type' to optionally enable type while federating the resource", err) + } + + generatedTypeConfig := enable.GenerateTypeConfigForTarget(apiResource, enable.NewEnableTypeDirective()) + if notFound && enableType { // We have already generated typeConfig to additionally enable type + return false, generatedTypeConfig, nil + } + if outputYAML { // Output as yaml does not bother what error happened while accessing typeConfig + klog.V(1).Infof("Falling back to a generated type config due to lookup failure: %v", err) + return false, generatedTypeConfig, nil + } + return false, nil, err +} + +func getInstalledTypeConfig(hostConfig *rest.Config, typeName, kubefedNamespace string) (typeconfig.Interface, error) { + client, err := genericclient.New(hostConfig) + if err != nil { + return nil, errors.Wrap(err, "Failed to get generic client") + } + + concreteTypeConfig := &fedv1b1.FederatedTypeConfig{} + err = client.Get(context.TODO(), concreteTypeConfig, kubefedNamespace, typeName) + if err != nil { + return nil, err + } + return concreteTypeConfig, nil +} + +func getTargetResource(hostConfig *rest.Config, typeConfig typeconfig.Interface, qualifiedName ctlutil.QualifiedName) (*unstructured.Unstructured, error) { + targetAPIResource := typeConfig.GetTargetType() + targetClient, err := ctlutil.NewResourceClient(hostConfig, &targetAPIResource) + if err != nil { + return nil, errors.Wrapf(err, "Error creating client for %s", targetAPIResource.Kind) + } + + kind := targetAPIResource.Kind + resource, err := targetClient.Resources(qualifiedName.Namespace).Get(qualifiedName.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "Error retrieving target %s %q", kind, qualifiedName) + } + + klog.V(2).Infof("Target %s %q found", kind, qualifiedName) + return resource, nil +} + +func FederatedResourceFromTargetResource(typeConfig typeconfig.Interface, resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { + fedAPIResource := typeConfig.GetFederatedType() + targetResource := resource.DeepCopy() + + targetKind := typeConfig.GetTargetType().Kind + + // Special handling is needed for some controller set fields. + switch targetKind { + case ctlutil.NamespaceKind: + { + unstructured.RemoveNestedField(targetResource.Object, "spec", "finalizers") + } + case ctlutil.ServiceAccountKind: + { + unstructured.RemoveNestedField(targetResource.Object, ctlutil.SecretsField) + } + case ctlutil.ServiceKind: + { + var targetPorts []interface{} + targetPorts, ok, err := unstructured.NestedSlice(targetResource.Object, "spec", "ports") + if err != nil { + return nil, err + } + if ok { + for index := range targetPorts { + port := targetPorts[index].(map[string]interface{}) + delete(port, "nodePort") + targetPorts[index] = port + } + err := unstructured.SetNestedSlice(targetResource.Object, targetPorts, "spec", "ports") + if err != nil { + return nil, err + } + } + unstructured.RemoveNestedField(targetResource.Object, "spec", "clusterIP") + } + } + + qualifiedName := ctlutil.NewQualifiedName(targetResource) + resourceNamespace := getNamespace(typeConfig, qualifiedName) + fedResource := &unstructured.Unstructured{} + SetBasicMetaFields(fedResource, fedAPIResource, qualifiedName.Name, resourceNamespace, "") + + // Warn if annotations are present in case the intention is to + // define annotations in the template of the federated resource. + annotations, _, err := unstructured.NestedMap(targetResource.Object, "metadata", "annotations") + if err != nil { + return nil, errors.Wrap(err, "Failed to retrieve metadata.annotations") + } + if len(annotations) > 0 { + klog.Warningf("Annotations defined for %s %q will not appear in the template of the federated resource: %v", targetKind, qualifiedName, annotations) + } + + if err := RemoveUnwantedFields(targetResource); err != nil { + return nil, err + } + + err = unstructured.SetNestedField(fedResource.Object, targetResource.Object, ctlutil.SpecField, ctlutil.TemplateField) + if err != nil { + return nil, err + } + err = unstructured.SetNestedStringMap(fedResource.Object, map[string]string{}, ctlutil.SpecField, ctlutil.PlacementField, ctlutil.ClusterSelectorField, ctlutil.MatchLabelsField) + if err != nil { + return nil, err + } + + return fedResource, err +} + +func getNamespace(typeConfig typeconfig.Interface, qualifiedName ctlutil.QualifiedName) string { + if typeConfig.GetTargetType().Kind == ctlutil.NamespaceKind { + return qualifiedName.Name + } + return qualifiedName.Namespace +} + +func CreateResources(cmdOut io.Writer, hostConfig *rest.Config, artifactsList []*FederateArtifacts, namespace string, enableType, dryRun bool) error { + for _, artifacts := range artifactsList { + if enableType && !artifacts.typeConfigInstalled { + enableTypeDirective := enable.NewEnableTypeDirective() + enableTypeDirective.Name = artifacts.typeConfig.GetObjectMeta().Name + typeResources, err := enable.GetResources(hostConfig, enableTypeDirective) + if err != nil { + return err + } + err = enable.CreateResources(cmdOut, hostConfig, typeResources, namespace, dryRun) + if err != nil { + return err + } + } + + err := CreateFederatedResources(hostConfig, artifacts.typeConfig, artifacts.federatedResources, dryRun) + if err != nil { + return err + } + } + + return nil +} + +func CreateFederatedResources(hostConfig *rest.Config, typeConfig typeconfig.Interface, federatedResources []*unstructured.Unstructured, dryRun bool) error { + for _, federatedResource := range federatedResources { + err := CreateFederatedResource(hostConfig, typeConfig, federatedResource, dryRun) + if err != nil { + return err + } + } + + return nil +} + +func CreateFederatedResource(hostConfig *rest.Config, typeConfig typeconfig.Interface, federatedResource *unstructured.Unstructured, dryRun bool) error { + if typeConfig.GetTargetType().Kind == ctlutil.NamespaceKind { + // TODO: irfanurrehman: Can a target namespace be federated into another namespace? + klog.Infof("Resource to federate is a namespace. Given namespace will itself be the container for the federated namespace") + } + + fedAPIResource := typeConfig.GetFederatedType() + fedKind := fedAPIResource.Kind + fedClient, err := ctlutil.NewResourceClient(hostConfig, &fedAPIResource) + if err != nil { + return errors.Wrapf(err, "Error creating client for %s", fedKind) + } + + qualifiedFedName := ctlutil.NewQualifiedName(federatedResource) + if !dryRun { + // It might take a little while for the federated type to appear if the + // same is being enabled while or immediately before federating the resource. + err = wait.PollImmediate(createResourceRetryInterval, createResourceRetryTimeout, func() (bool, error) { + _, err := fedClient.Resources(federatedResource.GetNamespace()).Create(federatedResource, metav1.CreateOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil + }) + if err != nil { + return errors.Wrapf(err, "Error creating federated resource %q", qualifiedFedName) + } + } + + klog.Infof("Successfully created %s %q from %s", fedKind, qualifiedFedName, typeConfig.GetTargetType().Kind) + return nil +} + +func GetContainedArtifactsList(hostConfig *rest.Config, containerNamespace, kubefedNamespace string, skipAPIResourceNames []string, enableType, outputYAML bool) ([]*FederateArtifacts, error) { + targetResourcesList, err := getResourcesInNamespace(hostConfig, containerNamespace, skipAPIResourceNames) + if err != nil { + return nil, err + } + + artifactsList := []*FederateArtifacts{} + for _, targetResources := range targetResourcesList { + apiResource := targetResources.apiResource + typeConfigInstalled, typeConfig, err := getTypeConfig(hostConfig, apiResource, kubefedNamespace, enableType, outputYAML) + if err != nil { + return nil, err + } + var federatedResources []*unstructured.Unstructured + for _, targetResource := range targetResources.resources { + federatedResource, err := FederatedResourceFromTargetResource(typeConfig, targetResource) + if err != nil { + return nil, err + } + + federatedResources = append(federatedResources, federatedResource) + } + federateArtifacts := FederateArtifacts{ + typeConfigInstalled: typeConfigInstalled, + typeConfig: typeConfig, + federatedResources: federatedResources, + } + artifactsList = append(artifactsList, &federateArtifacts) + } + + return artifactsList, nil +} + +func WriteUnstructuredObjsToYaml(unstructuredObjs []*unstructured.Unstructured, w io.Writer) error { + for _, unstructuredObj := range unstructuredObjs { + if _, err := w.Write([]byte("---\n")); err != nil { + return errors.Wrap(err, "Error encoding object to yaml") + } + err := util.WriteUnstructuredToYaml(unstructuredObj, w) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/federate/util.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/federate/util.go new file mode 100644 index 000000000..d1518552e --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/federate/util.go @@ -0,0 +1,264 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federate + +import ( + "bufio" + "io" + "os" + + "github.com/pkg/errors" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + versionhelper "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/rest" + "k8s.io/klog" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kubefed/pkg/apis/core/typeconfig" + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/enable" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +func RemoveUnwantedFields(resource *unstructured.Unstructured) error { + unstructured.RemoveNestedField(resource.Object, "apiVersion") + unstructured.RemoveNestedField(resource.Object, "kind") + unstructured.RemoveNestedField(resource.Object, "status") + + // All metadata fields save labels should be cleared. Other + // metadata fields will be set by the system on creation or + // subsequently by controllers. + labels, _, err := unstructured.NestedMap(resource.Object, "metadata", "labels") + if err != nil { + return errors.Wrap(err, "Failed to retrieve metadata.labels") + } + unstructured.RemoveNestedField(resource.Object, "metadata") + if len(labels) > 0 { + err := unstructured.SetNestedMap(resource.Object, labels, "metadata", "labels") + if err != nil { + return errors.Wrap(err, "Failed to set metadata.labels") + } + } + + return nil +} + +func SetBasicMetaFields(resource *unstructured.Unstructured, apiResource metav1.APIResource, name, namespace, generateName string) { + resource.SetKind(apiResource.Kind) + gv := schema.GroupVersion{Group: apiResource.Group, Version: apiResource.Version} + resource.SetAPIVersion(gv.String()) + resource.SetName(name) + if generateName != "" { + resource.SetGenerateName(generateName) + } + if apiResource.Namespaced { + resource.SetNamespace(namespace) + } +} + +func namespacedAPIResourceMap(config *rest.Config, skipAPIResourceNames []string) (map[string]metav1.APIResource, error) { + apiResourceLists, err := enable.GetServerPreferredResources(config) + if err != nil { + return nil, err + } + + apiResources := make(map[string]metav1.APIResource) + for _, apiResourceList := range apiResourceLists { + if len(apiResourceList.APIResources) == 0 { + continue + } + + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + return nil, errors.Wrap(err, "Error parsing GroupVersion") + } + + group := gv.Group + if apiResourceGroupMatchesSkipName(skipAPIResourceNames, group) { + // A whole group is skipped by the user + continue + } + + if group == "extensions" { + // The strategy involved to choose a Group higher in order for k8s core + // APIs is to consider "extensions" as the outdated group [This seems to + // be true for all k8s APIResources, so far]. For example if "deployments" + // exists in "extensions" and "apps"; "deployments.apps" will be chosen. + // This doesn't apply to events but events are listed in + // controllerCreatedAPIResourceNames and so are skipped always. + + // Skipping this also assumes that "extensions" is not the only + // group exposed for this resource on the API Server, which probably + // is safe as "extensions" is deprecated. + // TODO(irfanurrehman): Document this. + continue + } + + for _, apiResource := range apiResourceList.APIResources { + if !apiResource.Namespaced || util.IsFederatedAPIResource(apiResource.Kind, group) || + apiResourceMatchesSkipName(apiResource, skipAPIResourceNames, group) { + continue + } + + // For all other resources (say CRDs) same kinds in different groups + // are treated as individual types. If there happens to be an API Resource + // which enables conversion and allows query of the same resource across + // different groups, a specific group resource will have to be chosen by + // the user using --skip-names to skip the not chosen one(s). + // TODO(irfanurrehman): Document this. + + // The individual apiResources do not have the group and version set + apiResource.Group = group + apiResource.Version = gv.Version + groupQualifiedName := typeconfig.GroupQualifiedName(apiResource) + if previousAPIResource, ok := apiResources[groupQualifiedName]; ok { + if versionhelper.CompareKubeAwareVersionStrings(gv.Version, previousAPIResource.Version) <= 0 { + // The newer version is not latest keep the previous. + continue + } + } + + apiResources[groupQualifiedName] = apiResource + } + } + + return apiResources, nil +} + +func apiResourceGroupMatchesSkipName(skipAPIResourceNames []string, group string) bool { + for _, name := range skipAPIResourceNames { + if name == "" { + continue + } + if name == group { + return true + } + } + return false +} + +func apiResourceMatchesSkipName(apiResource metav1.APIResource, skipAPIResourceNames []string, group string) bool { + names := append(controllerCreatedAPIResourceNames, skipAPIResourceNames...) + for _, name := range names { + if name == "" { + continue + } + if enable.NameMatchesResource(name, apiResource, group) { + return true + } + } + return false +} + +// resources stores a list of resources for an api type +type resources struct { + // resource type information + apiResource metav1.APIResource + // resource list + resources []*unstructured.Unstructured +} + +func getResourcesInNamespace(config *rest.Config, namespace string, skipAPIResourceNames []string) ([]resources, error) { + apiResources, err := namespacedAPIResourceMap(config, skipAPIResourceNames) + if err != nil { + return nil, err + } + + resourcesInNamespace := []resources{} + for _, apiResource := range apiResources { + client, err := ctlutil.NewResourceClient(config, &apiResource) + if err != nil { + return nil, errors.Wrapf(err, "Error creating client for %s", apiResource.Kind) + } + + resourceList, err := client.Resources(namespace).List(metav1.ListOptions{}) + if apierrors.IsNotFound(err) || resourceList == nil { + continue + } + if err != nil { + return nil, errors.Wrapf(err, "Error listing resources for %s", apiResource.Kind) + } + + // It would be a waste of cycles to iterate through empty slices while federating resource + if len(resourceList.Items) == 0 { + continue + } + + targetResources := resources{apiResource: apiResource} + for _, item := range resourceList.Items { + resource := item + errors := validation.IsDNS1123Subdomain(resource.GetName()) + if len(errors) == 0 { + targetResources.resources = append(targetResources.resources, &resource) + } else { + klog.Warningf("Skipping resource %s of type %s because it does not conform to the DNS-1123 subdomain spec.", resource.GetName(), apiResource.Name) + klog.Warningf("The following error(s) were reported during DNS-1123 validation: ") + for _, err := range errors { + klog.Warningf(err) + } + } + } + resourcesInNamespace = append(resourcesInNamespace, targetResources) + } + + return resourcesInNamespace, nil +} + +// decodeUnstructuredFromFile reads a list of yamls into a slice of unstructured objects +func DecodeUnstructuredFromFile(filename string) ([]*unstructured.Unstructured, error) { + var f *os.File + if filename == "-" { + f = os.Stdin + } else { + var err error + f, err = os.Open(filename) + + if err != nil { + return nil, err + } + } + defer f.Close() + + var unstructuredList []*unstructured.Unstructured + reader := utilyaml.NewYAMLReader(bufio.NewReader(f)) + for { + unstructuedObj := &unstructured.Unstructured{} + // Read one YAML document at a time, until io.EOF is returned + buf, err := reader.Read() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + if len(buf) == 0 { + break + } + if err := yaml.Unmarshal(buf, unstructuedObj); err != nil { + return nil, err + } + + unstructuredList = append(unstructuredList, unstructuedObj) + } + + return unstructuredList, nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/join.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/join.go new file mode 100644 index 000000000..27d0e60e1 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/join.go @@ -0,0 +1,880 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubefedctl + +import ( + "context" + goerrors "errors" + "io" + "reflect" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +const ( + serviceAccountSecretTimeout = 30 * time.Second +) + +var ( + join_long = ` + Join registers a Kubernetes cluster with a KubeFed control + plane. + + Current context is assumed to be a Kubernetes cluster + hosting a KubeFed control plane. Please use the + --host-cluster-context flag otherwise.` + join_example = ` + # Register a cluster with a KubeFed control plane by + # specifying the cluster name and the context name of + # the control plane's host cluster. Cluster name must + # be a valid RFC 1123 subdomain name. Cluster context + # must be specified if the cluster name is different + # than the cluster's context in the local kubeconfig. + kubefedctl join foo --host-cluster-context=bar` + + // Policy rules allowing full access to resources in the cluster + // or namespace. + namespacedPolicyRules = []rbacv1.PolicyRule{ + { + Verbs: []string{rbacv1.VerbAll}, + APIGroups: []string{rbacv1.APIGroupAll}, + Resources: []string{rbacv1.ResourceAll}, + }, + } + clusterPolicyRules = []rbacv1.PolicyRule{ + namespacedPolicyRules[0], + { + NonResourceURLs: []string{rbacv1.NonResourceAll}, + Verbs: []string{"get"}, + }, + } +) + +type joinFederation struct { + options.GlobalSubcommandOptions + options.CommonJoinOptions + joinFederationOptions +} + +type joinFederationOptions struct { + secretName string + scope apiextv1b1.ResourceScope + errorOnExisting bool +} + +// Bind adds the join specific arguments to the flagset passed in as an +// argument. +func (o *joinFederationOptions) Bind(flags *pflag.FlagSet) { + flags.StringVar(&o.secretName, "secret-name", "", + "Name of the secret where the cluster's credentials will be stored in the host cluster. This name should be a valid RFC 1035 label. If unspecified, defaults to a generated name containing the cluster name.") + flags.BoolVar(&o.errorOnExisting, "error-on-existing", true, + "Whether the join operation will throw an error if it encounters existing artifacts with the same name as those it's trying to create. If false, the join operation will update existing artifacts to match its own specification.") +} + +// NewCmdJoin defines the `join` command that registers a cluster with +// a KubeFed control plane. +func NewCmdJoin(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &joinFederation{} + + cmd := &cobra.Command{ + Use: "join CLUSTER_NAME --host-cluster-context=HOST_CONTEXT", + Short: "Register a cluster with a KubeFed control plane", + Long: join_long, + Example: join_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.Run(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + opts.CommonSubcommandBind(flags) + opts.Bind(flags) + + return cmd +} + +// Complete ensures that options are valid and marshals them if necessary. +func (j *joinFederation) Complete(args []string) error { + err := j.SetName(args) + if err != nil { + return err + } + + if j.ClusterContext == "" { + klog.V(2).Infof("Defaulting cluster context to joining cluster name %s", j.ClusterName) + j.ClusterContext = j.ClusterName + } + + if j.HostClusterName != "" && strings.ContainsAny(j.HostClusterName, ":/") { + return goerrors.New("host-cluster-name may not contain \"/\" or \":\"") + } + + if j.HostClusterName == "" && strings.ContainsAny(j.HostClusterContext, ":/") { + klog.Fatal("host-cluster-name must be set if the name of the host cluster context contains one of \":\" or \"/\"") + } + + klog.V(2).Infof("Args and flags: name %s, host: %s, host-system-namespace: %s, kubeconfig: %s, cluster-context: %s, secret-name: %s, dry-run: %v", + j.ClusterName, j.HostClusterContext, j.KubeFedNamespace, j.Kubeconfig, j.ClusterContext, + j.secretName, j.DryRun) + + return nil +} + +// Run is the implementation of the `join` command. +func (j *joinFederation) Run(cmdOut io.Writer, config util.FedConfig) error { + hostClientConfig := config.GetClientConfig(j.HostClusterContext, j.Kubeconfig) + if err := j.SetHostClusterContextFromConfig(hostClientConfig); err != nil { + return err + } + + hostConfig, err := hostClientConfig.ClientConfig() + if err != nil { + // TODO(font): Return new error with this same text so it can be output + // by caller. + klog.V(2).Infof("Failed to get host cluster config: %v", err) + return err + } + + j.scope, err = options.GetScopeFromKubeFedConfig(hostConfig, j.KubeFedNamespace) + if err != nil { + return err + } + + clusterConfig, err := config.ClusterConfig(j.ClusterContext, j.Kubeconfig) + if err != nil { + klog.V(2).Infof("Failed to get joining cluster config: %v", err) + return err + } + + hostClusterName := j.HostClusterContext + if j.HostClusterName != "" { + hostClusterName = j.HostClusterName + } + + _, err = JoinCluster(hostConfig, clusterConfig, j.KubeFedNamespace, + hostClusterName, j.ClusterName, j.secretName, j.scope, j.DryRun, j.errorOnExisting) + + return err +} + +// JoinCluster registers a cluster with a KubeFed control plane. The +// KubeFed namespace in the joining cluster will be the same as in the +// host cluster. +func JoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, + hostClusterName, joiningClusterName, secretName string, + scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) { + + return joinClusterForNamespace(hostConfig, clusterConfig, kubefedNamespace, + kubefedNamespace, hostClusterName, joiningClusterName, secretName, + scope, dryRun, errorOnExisting) +} + +// joinClusterForNamespace registers a cluster with a KubeFed control +// plane. The KubeFed namespace in the joining cluster is provided by +// the joiningNamespace parameter. +func joinClusterForNamespace(hostConfig, clusterConfig *rest.Config, kubefedNamespace, + joiningNamespace, hostClusterName, joiningClusterName, secretName string, + scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) { + + hostClientset, err := util.HostClientset(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get host cluster clientset: %v", err) + return nil, err + } + + clusterClientset, err := util.ClusterClientset(clusterConfig) + if err != nil { + klog.V(2).Infof("Failed to get joining cluster clientset: %v", err) + return nil, err + } + + client, err := genericclient.New(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get kubefed clientset: %v", err) + return nil, err + } + + klog.V(2).Infof("Performing preflight checks.") + err = performPreflightChecks(clusterClientset, joiningClusterName, hostClusterName, joiningNamespace, errorOnExisting) + if err != nil { + return nil, err + } + + klog.V(2).Infof("Creating %s namespace in joining cluster", joiningNamespace) + _, err = createKubeFedNamespace(clusterClientset, joiningNamespace, + joiningClusterName, dryRun) + if err != nil { + klog.V(2).Infof("Error creating %s namespace in joining cluster: %v", + joiningNamespace, err) + return nil, err + } + klog.V(2).Infof("Created %s namespace in joining cluster", joiningNamespace) + + saName, err := createAuthorizedServiceAccount(clusterClientset, + joiningNamespace, joiningClusterName, hostClusterName, + scope, dryRun, errorOnExisting) + if err != nil { + return nil, err + } + + secret, caBundle, err := populateSecretInHostCluster(clusterClientset, hostClientset, + saName, kubefedNamespace, joiningNamespace, joiningClusterName, secretName, dryRun) + if err != nil { + klog.V(2).Infof("Error creating secret in host cluster: %s due to: %v", hostClusterName, err) + return nil, err + } + + var disabledTLSValidations []fedv1b1.TLSValidation + if clusterConfig.TLSClientConfig.Insecure { + disabledTLSValidations = append(disabledTLSValidations, fedv1b1.TLSAll) + } + + kubefedCluster, err := createKubeFedCluster(client, joiningClusterName, clusterConfig.Host, + secret.Name, kubefedNamespace, caBundle, disabledTLSValidations, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Failed to create federated cluster resource: %v", err) + return nil, err + } + + klog.V(2).Info("Created federated cluster resource") + return kubefedCluster, nil +} + +// This function is exported for testing purposes only. +var TestOnly_JoinClusterForNamespace = joinClusterForNamespace + +// performPreflightChecks checks that the host and joining clusters are in +// a consistent state. +func performPreflightChecks(clusterClientset kubeclient.Interface, name, hostClusterName, + kubefedNamespace string, errorOnExisting bool) error { + // Make sure there is no existing service account in the joining cluster. + saName := util.ClusterServiceAccountName(name, hostClusterName) + _, err := clusterClientset.CoreV1().ServiceAccounts(kubefedNamespace).Get(saName, + metav1.GetOptions{}) + + switch { + case apierrors.IsNotFound(err): + return nil + case err != nil: + return err + case errorOnExisting: + return errors.Errorf("service account: %s already exists in joining cluster: %s", saName, name) + default: + klog.V(2).Infof("Service account %s already exists in joining cluster %s", saName, name) + return nil + } +} + +// createKubeFedCluster creates a federated cluster resource that associates +// the cluster and secret. +func createKubeFedCluster(client genericclient.Client, joiningClusterName, apiEndpoint, + secretName, kubefedNamespace string, caBundle []byte, disabledTLSValidations []fedv1b1.TLSValidation, + dryRun, errorOnExisting bool) (*fedv1b1.KubeFedCluster, error) { + fedCluster := &fedv1b1.KubeFedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: kubefedNamespace, + Name: joiningClusterName, + }, + Spec: fedv1b1.KubeFedClusterSpec{ + APIEndpoint: apiEndpoint, + CABundle: caBundle, + SecretRef: fedv1b1.LocalSecretReference{ + Name: secretName, + }, + DisabledTLSValidations: disabledTLSValidations, + }, + } + + if dryRun { + return fedCluster, nil + } + + existingFedCluster := &fedv1b1.KubeFedCluster{} + err := client.Get(context.TODO(), existingFedCluster, kubefedNamespace, joiningClusterName) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not retrieve federated cluster %s due to %v", joiningClusterName, err) + return nil, err + case err == nil && errorOnExisting: + return nil, errors.Errorf("federated cluster %s already exists in host cluster", joiningClusterName) + case err == nil: + existingFedCluster.Spec = fedCluster.Spec + err = client.Update(context.TODO(), existingFedCluster) + if err != nil { + klog.V(2).Infof("Could not update federated cluster %s due to %v", fedCluster.Name, err) + return nil, err + } + return existingFedCluster, nil + default: + err = client.Create(context.TODO(), fedCluster) + if err != nil { + klog.V(2).Infof("Could not create federated cluster %s due to %v", fedCluster.Name, err) + return nil, err + } + return fedCluster, nil + } +} + +// createKubeFedNamespace creates the kubefed namespace in the cluster +// associated with clusterClientset, if it doesn't already exist. +func createKubeFedNamespace(clusterClientset kubeclient.Interface, kubefedNamespace, + joiningClusterName string, dryRun bool) (*corev1.Namespace, error) { + fedNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubefedNamespace, + }, + } + + if dryRun { + return fedNamespace, nil + } + + _, err := clusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + klog.V(2).Infof("Could not get %s namespace: %v", kubefedNamespace, err) + return nil, err + } + + if err == nil { + klog.V(2).Infof("Already existing %s namespace", kubefedNamespace) + return fedNamespace, nil + } + + // Not found, so create. + _, err = clusterClientset.CoreV1().Namespaces().Create(fedNamespace) + if err != nil && !apierrors.IsAlreadyExists(err) { + klog.V(2).Infof("Could not create %s namespace: %v", kubefedNamespace, err) + return nil, err + } + return fedNamespace, nil +} + +// createAuthorizedServiceAccount creates a service account and grants +// the privileges required by the KubeFed control plane to manage +// resources in the joining cluster. The name of the created service +// account is returned on success. +func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface, + namespace, joiningClusterName, hostClusterName string, + scope apiextv1b1.ResourceScope, dryRun, errorOnExisting bool) (string, error) { + + klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName) + + saName, err := createServiceAccount(joiningClusterClientset, namespace, + joiningClusterName, hostClusterName, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v", + saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created service account: %s in joining cluster: %s", saName, joiningClusterName) + + if scope == apiextv1b1.NamespaceScoped { + klog.V(2).Infof("Creating role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName) + + err = createRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating role and binding for service account: %s in joining cluster: %s due to: %v", saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created role and binding for service account: %s in joining cluster: %s", + saName, joiningClusterName) + + klog.V(2).Infof("Creating health check cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName) + + err = createHealthCheckClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, + dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating health check cluster role and binding for service account: %s in joining cluster: %s due to: %v", + saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created health check cluster role and binding for service account: %s in joining cluster: %s", + saName, joiningClusterName) + + } else { + klog.V(2).Infof("Creating cluster role and binding for service account: %s in joining cluster: %s", saName, joiningClusterName) + + err = createClusterRoleAndBinding(joiningClusterClientset, saName, namespace, joiningClusterName, dryRun, errorOnExisting) + if err != nil { + klog.V(2).Infof("Error creating cluster role and binding for service account: %s in joining cluster: %s due to: %v", + saName, joiningClusterName, err) + return "", err + } + + klog.V(2).Infof("Created cluster role and binding for service account: %s in joining cluster: %s", + saName, joiningClusterName) + } + + return saName, nil +} + +// createServiceAccount creates a service account in the cluster associated +// with clusterClientset with credentials that will be used by the host cluster +// to access its API server. +func createServiceAccount(clusterClientset kubeclient.Interface, namespace, + joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) { + saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName) + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: saName, + Namespace: namespace, + }, + } + + if dryRun { + return saName, nil + } + + // Create a new service account. + _, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Create(sa) + switch { + case apierrors.IsAlreadyExists(err) && errorOnExisting: + klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName) + return "", err + case err != nil && !apierrors.IsAlreadyExists(err): + klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err) + return "", err + default: + return saName, nil + } +} + +func bindingSubjects(saName, namespace string) []rbacv1.Subject { + return []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: saName, + Namespace: namespace, + }, + } +} + +// createClusterRoleAndBinding creates an RBAC cluster role and +// binding that allows the service account identified by saName to +// access all resources in all namespaces in the cluster associated +// with clientset. +func createClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error { + if dryRun { + return nil + } + + roleName := util.RoleName(saName) + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: clusterPolicyRules, + } + existingRole, err := clientset.RbacV1().ClusterRoles().Get(roleName, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get cluster role for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("cluster role for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + existingRole.Rules = role.Rules + _, err := clientset.RbacV1().ClusterRoles().Update(existingRole) + if err != nil { + klog.V(2).Infof("Could not update cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + default: // role was not found + _, err := clientset.RbacV1().ClusterRoles().Create(role) + if err != nil { + klog.V(2).Infof("Could not create cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + // TODO: This should limit its access to only necessary resources. + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Subjects: bindingSubjects(saName, namespace), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + } + existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get cluster role binding for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + // The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding + // must be deleted and recreated with the correct roleRef + if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) { + err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{}) + if err != nil { + klog.V(2).Infof("Could not delete existing cluster role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } else { + existingBinding.Subjects = binding.Subjects + _, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding) + if err != nil { + klog.V(2).Infof("Could not update cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + default: + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + return nil +} + +// createRoleAndBinding creates an RBAC role and binding +// that allows the service account identified by saName to access all +// resources in the specified namespace. +func createRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error { + if dryRun { + return nil + } + + roleName := util.RoleName(saName) + + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: namespacedPolicyRules, + } + existingRole, err := clientset.RbacV1().Roles(namespace).Get(roleName, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not retrieve role for service account %s in joining cluster %s due to %v", saName, clusterName, err) + return err + case errorOnExisting && err == nil: + return errors.Errorf("role for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + existingRole.Rules = role.Rules + _, err = clientset.RbacV1().Roles(namespace).Update(existingRole) + if err != nil { + klog.V(2).Infof("Could not update role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + default: + _, err := clientset.RbacV1().Roles(namespace).Create(role) + if err != nil { + klog.V(2).Infof("Could not create role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + binding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Subjects: bindingSubjects(saName, namespace), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: roleName, + }, + } + + existingBinding, err := clientset.RbacV1().RoleBindings(namespace).Get(binding.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not retrieve role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("role binding for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + // The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding + // must be deleted and recreated with the correct roleRef + if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) { + err = clientset.RbacV1().RoleBindings(namespace).Delete(existingBinding.Name, &metav1.DeleteOptions{}) + if err != nil { + klog.V(2).Infof("Could not delete existing role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + _, err = clientset.RbacV1().RoleBindings(namespace).Create(binding) + if err != nil { + klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } else { + existingBinding.Subjects = binding.Subjects + _, err = clientset.RbacV1().RoleBindings(namespace).Update(existingBinding) + if err != nil { + klog.V(2).Infof("Could not update role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + } + default: + _, err = clientset.RbacV1().RoleBindings(namespace).Create(binding) + if err != nil { + klog.V(2).Infof("Could not create role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + return nil +} + +// createHealthCheckClusterRoleAndBinding creates an RBAC cluster role and +// binding that allows the service account identified by saName to +// access the health check path of the cluster. +func createHealthCheckClusterRoleAndBinding(clientset kubeclient.Interface, saName, namespace, clusterName string, dryRun, errorOnExisting bool) error { + if dryRun { + return nil + } + + roleName := util.HealthCheckRoleName(saName, namespace) + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"Get"}, + NonResourceURLs: []string{"/healthz"}, + }, + // The cluster client expects to be able to list nodes to retrieve zone and region details. + // TODO(marun) Consider making zone/region retrieval optional + { + Verbs: []string{"list"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + } + existingRole, err := clientset.RbacV1().ClusterRoles().Get(role.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get health check cluster role for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("health check cluster role for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + existingRole.Rules = role.Rules + _, err := clientset.RbacV1().ClusterRoles().Update(existingRole) + if err != nil { + klog.V(2).Infof("Could not update health check cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + default: // role was not found + _, err := clientset.RbacV1().ClusterRoles().Create(role) + if err != nil { + klog.V(2).Infof("Could not create health check cluster role for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Subjects: bindingSubjects(saName, namespace), + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + } + existingBinding, err := clientset.RbacV1().ClusterRoleBindings().Get(binding.Name, metav1.GetOptions{}) + switch { + case err != nil && !apierrors.IsNotFound(err): + klog.V(2).Infof("Could not get health check cluster role binding for service account %s in joining cluster %s due to %v", + saName, clusterName, err) + return err + case err == nil && errorOnExisting: + return errors.Errorf("health check cluster role binding for service account %s in joining cluster %s already exists", saName, clusterName) + case err == nil: + // The roleRef cannot be updated, therefore if the existing roleRef is different, the existing rolebinding + // must be deleted and recreated with the correct roleRef + if !reflect.DeepEqual(existingBinding.RoleRef, binding.RoleRef) { + err = clientset.RbacV1().ClusterRoleBindings().Delete(existingBinding.Name, &metav1.DeleteOptions{}) + if err != nil { + klog.V(2).Infof("Could not delete existing health check cluster role binding for service account %s in joining cluster %s due to: %v", + saName, clusterName, err) + return err + } + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } else { + existingBinding.Subjects = binding.Subjects + _, err := clientset.RbacV1().ClusterRoleBindings().Update(existingBinding) + if err != nil { + klog.V(2).Infof("Could not update health check cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + default: + _, err = clientset.RbacV1().ClusterRoleBindings().Create(binding) + if err != nil { + klog.V(2).Infof("Could not create health check cluster role binding for service account: %s in joining cluster: %s due to: %v", + saName, clusterName, err) + return err + } + } + return nil +} + +// populateSecretInHostCluster copies the service account secret for saName +// from the cluster referenced by clusterClientset to the client referenced by +// hostClientset, putting it in a secret named secretName in the provided +// namespace. +func populateSecretInHostCluster(clusterClientset, hostClientset kubeclient.Interface, + saName, hostNamespace, joiningNamespace, joiningClusterName, secretName string, + dryRun bool) (*corev1.Secret, []byte, error) { + + klog.V(2).Infof("Creating cluster credentials secret in host cluster") + + if dryRun { + dryRunSecret := &corev1.Secret{} + dryRunSecret.Name = secretName + return dryRunSecret, nil, nil + } + + // Get the secret from the joining cluster. + var secret *corev1.Secret + err := wait.PollImmediate(1*time.Second, serviceAccountSecretTimeout, func() (bool, error) { + sa, err := clusterClientset.CoreV1().ServiceAccounts(joiningNamespace).Get(saName, + metav1.GetOptions{}) + if err != nil { + return false, nil + } + + for _, objReference := range sa.Secrets { + saSecretName := objReference.Name + var err error + secret, err = clusterClientset.CoreV1().Secrets(joiningNamespace).Get(saSecretName, + metav1.GetOptions{}) + if err != nil { + return false, nil + } + if secret.Type == corev1.SecretTypeServiceAccountToken { + klog.V(2).Infof("Using secret named: %s", secret.Name) + return true, nil + } + } + return false, nil + }) + + if err != nil { + klog.V(2).Infof("Could not get service account secret from joining cluster: %v", err) + return nil, nil, err + } + + token, ok := secret.Data[ctlutil.TokenKey] + if !ok { + return nil, nil, errors.Errorf("Key %q not found in service account secret", ctlutil.TokenKey) + } + + // Create a secret in the host cluster containing the token. + v1Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: hostNamespace, + }, + Data: map[string][]byte{ + ctlutil.TokenKey: token, + }, + } + + if secretName == "" { + v1Secret.GenerateName = joiningClusterName + "-" + } else { + v1Secret.Name = secretName + } + + v1SecretResult, err := hostClientset.CoreV1().Secrets(hostNamespace).Create(&v1Secret) + if err != nil { + klog.V(2).Infof("Could not create secret in host cluster: %v", err) + return nil, nil, err + } + + // caBundle is optional so no error is suggested if it is not + // found in the secret. + caBundle := secret.Data["ca.crt"] + + klog.V(2).Infof("Created secret in host cluster named: %s", v1SecretResult.Name) + return v1SecretResult, caBundle, nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/kubefedctl.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/kubefedctl.go new file mode 100644 index 000000000..9f71cc125 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/kubefedctl.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubefedctl + +import ( + "flag" + "io" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/client-go/tools/clientcmd" + apiserverflag "k8s.io/component-base/cli/flag" + + "sigs.k8s.io/kubefed/pkg/kubefedctl/enable" + "sigs.k8s.io/kubefed/pkg/kubefedctl/federate" + "sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +// NewKubeFedCtlCommand creates the `kubefedctl` command and its nested children. +func NewKubeFedCtlCommand(out io.Writer) *cobra.Command { + // Parent command to which all subcommands are added. + rootCmd := &cobra.Command{ + Use: "kubefedctl", + Short: "kubefedctl controls a Kubernetes Cluster Federation", + Long: "kubefedctl controls a Kubernetes Cluster Federation. Find more information at https://sigs.k8s.io/kubefed.", + + RunE: runHelp, + } + + // Add the command line flags from other dependencies (e.g., klog), but do not + // warn if they contain underscores. + pflag.CommandLine.SetNormalizeFunc(apiserverflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + rootCmd.PersistentFlags().AddFlagSet(pflag.CommandLine) + + // From this point and forward we get warnings on flags that contain "_" separators + rootCmd.SetGlobalNormalizationFunc(apiserverflag.WarnWordSepNormalizeFunc) + + // Prevent klog errors about logging before parsing. + _ = flag.CommandLine.Parse(nil) + + fedConfig := util.NewFedConfig(clientcmd.NewDefaultPathOptions()) + rootCmd.AddCommand(enable.NewCmdTypeEnable(out, fedConfig)) + rootCmd.AddCommand(NewCmdTypeDisable(out, fedConfig)) + rootCmd.AddCommand(federate.NewCmdFederateResource(out, fedConfig)) + rootCmd.AddCommand(NewCmdJoin(out, fedConfig)) + rootCmd.AddCommand(NewCmdUnjoin(out, fedConfig)) + rootCmd.AddCommand(orphaning.NewCmdOrphaning(out, fedConfig)) + rootCmd.AddCommand(NewCmdVersion(out)) + + return rootCmd +} + +func runHelp(cmd *cobra.Command, args []string) error { + return cmd.Help() +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/options/options.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/options/options.go new file mode 100644 index 000000000..41306bfbb --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/options/options.go @@ -0,0 +1,159 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "context" + + "github.com/pkg/errors" + "github.com/spf13/pflag" + + apiextv1b1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + "sigs.k8s.io/kubefed/pkg/controller/util" +) + +// GlobalSubcommandOptions holds the configuration required by the subcommands of +// `kubefedctl`. +type GlobalSubcommandOptions struct { + HostClusterContext string + KubeFedNamespace string + Kubeconfig string + DryRun bool +} + +// GlobalSubcommandBind adds the global subcommand flags to the flagset passed in. +func (o *GlobalSubcommandOptions) GlobalSubcommandBind(flags *pflag.FlagSet) { + flags.StringVar(&o.Kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") + flags.StringVar(&o.HostClusterContext, "host-cluster-context", "", "Host cluster context") + flags.StringVar(&o.KubeFedNamespace, "kubefed-namespace", util.DefaultKubeFedSystemNamespace, + "Namespace in the host cluster where the KubeFed system components are installed. This namespace will also be the target of propagation if the controller manager is running with namespaced scope.") + flags.BoolVar(&o.DryRun, "dry-run", false, + "Run the command in dry-run mode, without making any server requests.") +} + +// SetHostClusterContextFromConfig sets the host cluster context to +// the name of the the config context if a value was not provided. +func (o *GlobalSubcommandOptions) SetHostClusterContextFromConfig(config clientcmd.ClientConfig) error { + if len(o.HostClusterContext) > 0 { + return nil + } + currentContext, err := CurrentContext(config) + if err != nil { + return err + } + o.HostClusterContext = currentContext + return nil +} + +// CurrentContext retrieves the current context from the provided config. +func CurrentContext(config clientcmd.ClientConfig) (string, error) { + rawConfig, err := config.RawConfig() + if err != nil { + return "", errors.Wrap(err, "Failed to get current context from config") + } + return rawConfig.CurrentContext, nil +} + +// CommonJoinOptions holds the common configuration required by the join and +// unjoin subcommands of `kubefedctl`. +type CommonJoinOptions struct { + ClusterName string + ClusterContext string + HostClusterName string +} + +// CommonSubcommandBind adds the common subcommand flags to the flagset passed in. +func (o *CommonJoinOptions) CommonSubcommandBind(flags *pflag.FlagSet) { + flags.StringVar(&o.ClusterContext, "cluster-context", "", + "Name of the cluster's context in the local kubeconfig. Defaults to cluster name if unspecified.") + flags.StringVar(&o.HostClusterName, "host-cluster-name", "", + "If set, overrides the use of host-cluster-context name in resource names created in the target cluster. This option must be used when the context name has characters invalid for kubernetes resources like \"/\" and \":\".") +} + +// SetName sets the name from the args passed in for the required positional +// argument. +func (o *CommonJoinOptions) SetName(args []string) error { + if len(args) == 0 { + return errors.New("NAME is required") + } + + o.ClusterName = args[0] + return nil +} + +func GetScopeFromKubeFedConfig(hostConfig *rest.Config, namespace string) (apiextv1b1.ResourceScope, error) { + client, err := genericclient.New(hostConfig) + if err != nil { + err = errors.Wrap(err, "Failed to get kubefed clientset") + return "", err + } + + fedConfig := &fedv1b1.KubeFedConfig{} + err = client.Get(context.TODO(), fedConfig, namespace, util.KubeFedConfigName) + if apierrors.IsNotFound(err) { + return "", errors.Errorf( + "A KubeFedConfig named %q was not found in namespace %q. Is a KubeFed control plane running in this namespace?", + util.KubeFedConfigName, namespace) + } else if err != nil { + config := util.QualifiedName{ + Namespace: namespace, + Name: util.KubeFedConfigName, + } + err = errors.Wrapf(err, "Error retrieving KubeFedConfig %q", config) + return "", err + } + + return fedConfig.Spec.Scope, nil +} + +// CommonEnableOptions holds the common configuration required by the enable +// and disable subcommands of `kubefedctl`. +type CommonEnableOptions struct { + TargetName string + FederatedGroup string + TargetVersion string +} + +// Default values for the federated group and version used by +// the enable and disable subcommands of `kubefedctl`. +const ( + DefaultFederatedGroup = "types.kubefed.io" + DefaultFederatedVersion = "v1beta1" +) + +// CommonSubcommandBind adds the common subcommand flags to the flagset passed in. +func (o *CommonEnableOptions) CommonSubcommandBind(flags *pflag.FlagSet, federatedGroupUsage, targetVersionUsage string) { + flags.StringVar(&o.FederatedGroup, "federated-group", DefaultFederatedGroup, federatedGroupUsage) + flags.StringVar(&o.TargetVersion, "version", "", targetVersionUsage) +} + +// SetName sets the name from the args passed in for the required positional +// argument. +func (o *CommonEnableOptions) SetName(args []string) error { + if len(args) == 0 { + return errors.New("NAME is required") + } + + o.TargetName = args[0] + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/disable.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/disable.go new file mode 100644 index 000000000..c108d9c66 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/disable.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package orphaning + +import ( + "io" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +var ( + orphaning_disable_long = ` + Removes previously added "orphaning enable" ('kubefed.io/orphan: true') + annotation from a federated resource. When the federated resource is subsequently marked for deletion, + the resources it manages in member clusters will be removed before the federated resource is removed. + + Current context is assumed to be a Kubernetes cluster hosting + the kubefed control plane. Please use the + --host-cluster-context flag otherwise.` + + orphaning_disable_example = ` + # Disable the orphaning mode for a federated resource of type FederatedDeployment and named foo + kubefedctl orphaning disable FederatedDeployment foo --host-cluster-context=cluster1` +) + +// newCmdDisableOrphaning removes the 'kubefed.io/orphan: true' annotation from the federated resource +func newCmdDisableOrphaning(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &orphanResource{} + cmd := &cobra.Command{ + Use: "disable ", + Short: "Disable orphaning deletion to ensure the removal of managed resources before removing the managing federated resource", + Long: orphaning_disable_long, + Example: orphaning_disable_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.RunDisable(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + err := opts.Bind(flags) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + return cmd +} + +// RunDisable implements the `disable` command. +func (o *orphanResource) RunDisable(cmdOut io.Writer, config util.FedConfig) error { + resourceClient, err := o.GetResourceClient(config, cmdOut) + if err != nil { + return err + } + fedResource, err := o.GetFederatedResource(resourceClient) + if err != nil { + return err + } + if !ctlutil.IsOrphaningEnabled(fedResource) { + return nil + } + ctlutil.DisableOrphaning(fedResource) + _, err = resourceClient.Update(fedResource, metav1.UpdateOptions{}) + if err != nil { + return errors.Wrapf(err, "Failed to update resource %s %q", fedResource.GetKind(), + ctlutil.QualifiedName{Name: fedResource.GetName(), Namespace: fedResource.GetNamespace()}) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/enable.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/enable.go new file mode 100644 index 000000000..41d2c09a3 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/enable.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package orphaning + +import ( + "io" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +var ( + orphaning_enable_long = ` + Prevents the removal of managed resources from member clusters when their managing federated + resource is removed. This is accomplished by adding 'kubefed.io/orphan: true' as an annotation to the + federated resource. + + Current context is assumed to be a Kubernetes cluster hosting + the kubefed control plane. Please use the + --host-cluster-context flag otherwise.` + + orphan_enable_example = ` + # Enable the orphaning mode for a federated resource of type FederatedDeployment and named foo + kubefedctl orphaning enable FederatedDeployment foo --host-cluster-context=cluster1` +) + +// newCmdEnableOrphaning adds 'kubefed.io/orphan: true' as an annotation to the federated resource +func newCmdEnableOrphaning(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &orphanResource{} + cmd := &cobra.Command{ + Use: "enable ", + Short: "Enable the orphaning (i.e. retention) of resources managed by a federated resource upon its removal.", + Long: orphaning_enable_long, + Example: orphan_enable_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.RunEnable(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + err := opts.Bind(flags) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + return cmd +} + +// RunEnable implements the `enable` command. +func (o *orphanResource) RunEnable(cmdOut io.Writer, config util.FedConfig) error { + resourceClient, err := o.GetResourceClient(config, cmdOut) + if err != nil { + return err + } + fedResource, err := o.GetFederatedResource(resourceClient) + if err != nil { + return err + } + if ctlutil.IsOrphaningEnabled(fedResource) { + return nil + } + ctlutil.EnableOrphaning(fedResource) + _, err = resourceClient.Update(fedResource, metav1.UpdateOptions{}) + if err != nil { + return errors.Wrapf(err, "Failed to update resource %s %q", fedResource.GetKind(), + ctlutil.QualifiedName{Name: fedResource.GetName(), Namespace: fedResource.GetNamespace()}) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/orphaning.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/orphaning.go new file mode 100644 index 000000000..d625ed43d --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/orphaning.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package orphaning + +import ( + "fmt" + "io" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/klog" + + "sigs.k8s.io/kubefed/pkg/apis/core/typeconfig" + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/enable" + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +type orphanResource struct { + options.GlobalSubcommandOptions + typeName string + resourceName string + resourceNamespace string +} + +// Bind adds the join specific arguments to the flagset passed in as an argument. +func (o *orphanResource) Bind(flags *pflag.FlagSet) error { + flags.StringVarP(&o.resourceNamespace, "namespace", "n", "", "If present, the namespace scope for this CLI request") + err := flags.MarkHidden("kubefed-namespace") + if err != nil { + return err + } + err = flags.MarkHidden("dry-run") + if err != nil { + return err + } + return nil +} + +// NewCmdOrphaning the head of orphaning-deletion sub commands +func NewCmdOrphaning(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + cmd := &cobra.Command{ + Use: "orphaning-deletion", + Short: "Manage orphaning delete policy", + Long: "Manage orphaning delete policy", + Run: func(cmd *cobra.Command, args []string) { + err := cmd.Help() + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + cmd.AddCommand(newCmdEnableOrphaning(cmdOut, config)) + cmd.AddCommand(newCmdDisableOrphaning(cmdOut, config)) + cmd.AddCommand(newCmdStatusOrphaning(cmdOut, config)) + + return cmd +} + +// Complete ensures that options are valid and marshals them if necessary. +func (o *orphanResource) Complete(args []string, config util.FedConfig) error { + if len(args) == 0 { + return errors.New("resource type is required") + } + + o.typeName = args[0] + + if len(args) == 1 { + return errors.New("resource name is required") + } + o.resourceName = args[1] + + if len(o.resourceNamespace) == 0 { + var err error + o.resourceNamespace, err = util.GetNamespace(o.HostClusterContext, o.Kubeconfig, config) + return err + } + return nil +} + +// Returns a Federated Resources Interface +func (o *orphanResource) GetResourceClient(config util.FedConfig, cmdOut io.Writer) (dynamic.ResourceInterface, error) { + hostClientConfig := config.GetClientConfig(o.HostClusterContext, o.Kubeconfig) + if err := o.SetHostClusterContextFromConfig(hostClientConfig); err != nil { + return nil, err + } + hostConfig, err := hostClientConfig.ClientConfig() + if err != nil { + return nil, errors.Wrapf(err, "Unable to load configuration for cluster context %q in kubeconfig %q.`", + o.HostClusterContext, o.Kubeconfig) + } + // Lookup kubernetes API availability + apiResource, err := enable.LookupAPIResource(hostConfig, o.typeName, "") + if err != nil { + return nil, errors.Wrapf(err, "Failed to find targeted %s type", o.typeName) + } + klog.V(2).Infof("API Resource for %s/%s found", typeconfig.GroupQualifiedName(*apiResource), apiResource.Version) + if !util.IsFederatedAPIResource(apiResource.Kind, apiResource.Group) { + fmt.Fprintf(cmdOut, "Warning: %s/%s might not be a federated resource\n", + typeconfig.GroupQualifiedName(*apiResource), apiResource.Version) + } + targetClient, err := ctlutil.NewResourceClient(hostConfig, apiResource) + + if err != nil { + return nil, errors.Wrapf(err, "Error creating client for %s", apiResource.Kind) + } + + resourceClient := targetClient.Resources(o.resourceNamespace) + return resourceClient, nil +} + +// Returns the Federated resource where the orphaning-deletion will be managed +func (o *orphanResource) GetFederatedResource(resourceClient dynamic.ResourceInterface) (*unstructured.Unstructured, error) { + resource, err := resourceClient.Get(o.resourceName, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "Failed to retrieve resource: %q", + ctlutil.QualifiedName{Name: o.resourceName, Namespace: o.resourceNamespace}) + } + return resource, nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/status.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/status.go new file mode 100644 index 000000000..15ebfd5d5 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/orphaning/status.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package orphaning + +import ( + "io" + + "github.com/spf13/cobra" + + "k8s.io/klog" + + ctlutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +const ( + Enabled = "Enabled" + Disabled = "Disabled" +) + +var ( + orphaning_status_long = ` + Checks the status of "orphaning enable" ('kubefed.io/orphan: true') annotation on a federated resource. + Returns "Enabled" or "Disabled" + + Current context is assumed to be a Kubernetes cluster hosting the kubefed control plane. + Please use the --host-cluster-context flag otherwise.` + + orphaning_status_example = ` + # Checks the status of the orphaning mode of a federated resource of type FederatedDeployment and named foo + kubefedctl orphaning status FederatedDeployment foo --host-cluster-context=cluster1` +) + +// newCmdStatusOrphaning checks status of orphaning deletion of the federated resource +func newCmdStatusOrphaning(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &orphanResource{} + cmd := &cobra.Command{ + Use: "status ", + Short: "Get the orphaning deletion status of the federated resource", + Long: orphaning_status_long, + Example: orphaning_status_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.RunStatus(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + err := opts.Bind(flags) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + return cmd +} + +// RunStatus implements the `status` command. +func (o *orphanResource) RunStatus(cmdOut io.Writer, config util.FedConfig) error { + resourceClient, err := o.GetResourceClient(config, cmdOut) + if err != nil { + return err + } + fedResource, err := o.GetFederatedResource(resourceClient) + if err != nil { + return err + } + if ctlutil.IsOrphaningEnabled(fedResource) { + _, err = cmdOut.Write([]byte(Enabled + "\n")) + return err + } + _, err = cmdOut.Write([]byte(Disabled + "\n")) + return err +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/unjoin.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/unjoin.go new file mode 100644 index 000000000..7aa7a2535 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/unjoin.go @@ -0,0 +1,442 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubefedctl + +import ( + "context" + goerrors "errors" + "io" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog" + + fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1" + genericclient "sigs.k8s.io/kubefed/pkg/client/generic" + controllerutil "sigs.k8s.io/kubefed/pkg/controller/util" + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" + "sigs.k8s.io/kubefed/pkg/kubefedctl/util" +) + +var ( + unjoin_long = ` + Unjoin removes the registration of a Kubernetes cluster + from a KubeFed control plane. Current context is assumed + to be a Kubernetes cluster hosting a KubeFed control + plane. Please use the --host-cluster-context flag + otherwise.` + unjoin_example = ` + # Remove the registration of a Kubernetes cluster + # from a KubeFed control plane by specifying the + # cluster name and the context name of the control + # plane's host cluster. Cluster name must be a + # valid RFC 1123 subdomain name. Cluster context + # must be specified if the cluster name is different + # than the cluster's context in the local kubeconfig. + kubefedctl unjoin foo --host-cluster-context=bar` +) + +type unjoinFederation struct { + options.GlobalSubcommandOptions + options.CommonJoinOptions + unjoinFederationOptions +} + +type unjoinFederationOptions struct { + forceDeletion bool +} + +// Bind adds the unjoin specific arguments to the flagset passed in as an +// argument. +func (o *unjoinFederationOptions) Bind(flags *pflag.FlagSet) { + flags.BoolVar(&o.forceDeletion, "force", false, + "Delete federated cluster and secret resources even if resources in the cluster targeted for unjoin are not removed successfully.") +} + +// NewCmdUnjoin defines the `unjoin` command that removes the +// registration of a cluster from a KubeFed control plane. +func NewCmdUnjoin(cmdOut io.Writer, config util.FedConfig) *cobra.Command { + opts := &unjoinFederation{} + + cmd := &cobra.Command{ + Use: "unjoin CLUSTER_NAME --host-cluster-context=HOST_CONTEXT", + Short: "Remove the registration of a cluster from a KubeFed control plane", + Long: unjoin_long, + Example: unjoin_example, + Run: func(cmd *cobra.Command, args []string) { + err := opts.Complete(args) + if err != nil { + klog.Fatalf("Error: %v", err) + } + + err = opts.Run(cmdOut, config) + if err != nil { + klog.Fatalf("Error: %v", err) + } + }, + } + + flags := cmd.Flags() + opts.GlobalSubcommandBind(flags) + opts.CommonSubcommandBind(flags) + opts.Bind(flags) + + return cmd +} + +// Complete ensures that options are valid and marshals them if necessary. +func (j *unjoinFederation) Complete(args []string) error { + err := j.SetName(args) + if err != nil { + return err + } + + if j.ClusterContext == "" { + klog.V(2).Infof("Defaulting cluster context to unjoining cluster name %s", j.ClusterName) + j.ClusterContext = j.ClusterName + } + + if j.HostClusterName != "" && strings.ContainsAny(j.HostClusterName, ":/") { + return goerrors.New("host-cluster-name may not contain \"/\" or \":\"") + } + + if j.HostClusterName == "" && strings.ContainsAny(j.HostClusterContext, ":/") { + return goerrors.New("host-cluster-name must be set if the name of the host cluster context contains one of \":\" or \"/\"") + } + + klog.V(2).Infof("Args and flags: name %s, host-cluster-context: %s, host-system-namespace: %s, kubeconfig: %s, cluster-context: %s, dry-run: %v", + j.ClusterName, j.HostClusterContext, j.KubeFedNamespace, j.Kubeconfig, j.ClusterContext, j.DryRun) + + return nil +} + +// Run is the implementation of the `unjoin` command. +func (j *unjoinFederation) Run(cmdOut io.Writer, config util.FedConfig) error { + hostClientConfig := config.GetClientConfig(j.HostClusterContext, j.Kubeconfig) + if err := j.SetHostClusterContextFromConfig(hostClientConfig); err != nil { + return err + } + + hostConfig, err := hostClientConfig.ClientConfig() + if err != nil { + // TODO(font): Return new error with this same text so it can be output + // by caller. + klog.V(2).Infof("Failed to get host cluster config: %v", err) + return err + } + + clusterConfig, err := config.ClusterConfig(j.ClusterContext, j.Kubeconfig) + if err != nil { + klog.V(2).Infof("Failed to get unjoining cluster config: %v", err) + + if !j.forceDeletion { + return err + } + // If configuration for the member cluster cannot be successfully loaded, + // forceDeletion indicates that resources associated with the member cluster + // should still be removed from the host cluster. + } + + hostClusterName := j.HostClusterContext + if j.HostClusterName != "" { + hostClusterName = j.HostClusterName + } + + return UnjoinCluster(hostConfig, clusterConfig, j.KubeFedNamespace, + hostClusterName, j.ClusterContext, j.ClusterName, j.forceDeletion, j.DryRun) +} + +// UnjoinCluster performs all the necessary steps to remove the +// registration of a cluster from a KubeFed control plane provided the +// required set of parameters are passed in. +func UnjoinCluster(hostConfig, clusterConfig *rest.Config, kubefedNamespace, hostClusterName, + unjoiningClusterContext, unjoiningClusterName string, forceDeletion, dryRun bool) error { + + hostClientset, err := util.HostClientset(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get host cluster clientset: %v", err) + return err + } + + var clusterClientset *kubeclient.Clientset + if clusterConfig != nil { + clusterClientset, err = util.ClusterClientset(clusterConfig) + if err != nil { + klog.V(2).Infof("Failed to get unjoining cluster clientset: %v", err) + if !forceDeletion { + return err + } + } + } + + client, err := genericclient.New(hostConfig) + if err != nil { + klog.V(2).Infof("Failed to get kubefed clientset: %v", err) + return err + } + + if clusterClientset != nil { + err := deleteRBACResources(clusterClientset, kubefedNamespace, unjoiningClusterName, hostClusterName, forceDeletion, dryRun) + if err != nil { + if !forceDeletion { + return err + } + klog.V(2).Infof("Failed to delete RBAC resources: %v", err) + } + + err = deleteFedNSFromUnjoinCluster(hostClientset, clusterClientset, kubefedNamespace, unjoiningClusterName, dryRun) + if err != nil { + if !forceDeletion { + return err + } + klog.V(2).Infof("Failed to delete kubefed namespace: %v", err) + } + } + + // deletionSucceeded when all operations in deleteRBACResources and deleteFedNSFromUnjoinCluster succeed. + return deleteFederatedClusterAndSecret(hostClientset, client, kubefedNamespace, unjoiningClusterName, forceDeletion, dryRun) +} + +// deleteKubeFedClusterAndSecret deletes a federated cluster resource that associates +// the cluster and secret. +func deleteFederatedClusterAndSecret(hostClientset kubeclient.Interface, client genericclient.Client, + kubefedNamespace, unjoiningClusterName string, forceDeletion, dryRun bool) error { + if dryRun { + return nil + } + + klog.V(2).Infof("Deleting kubefed cluster resource from namespace %q for unjoin cluster %q", + kubefedNamespace, unjoiningClusterName) + + fedCluster := &fedv1b1.KubeFedCluster{} + err := client.Get(context.TODO(), fedCluster, kubefedNamespace, unjoiningClusterName) + if err != nil { + return errors.Wrapf(err, "Failed to get kubefed cluster \"%s/%s\"", kubefedNamespace, unjoiningClusterName) + } + + err = hostClientset.CoreV1().Secrets(kubefedNamespace).Delete(fedCluster.Spec.SecretRef.Name, + &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Secret \"%s/%s\" does not exist in the host cluster.", kubefedNamespace, fedCluster.Spec.SecretRef.Name) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Failed to delete secret \"%s/%s\" for unjoin cluster %q", + kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted secret \"%s/%s\" for unjoin cluster %q", kubefedNamespace, fedCluster.Spec.SecretRef.Name, unjoiningClusterName) + } + + err = client.Delete(context.TODO(), fedCluster, fedCluster.Namespace, fedCluster.Name) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("KubeFed cluster \"%s/%s\" does not exist in the host cluster.", fedCluster.Namespace, fedCluster.Name) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Failed to delete kubefed cluster \"%s/%s\" for unjoin cluster %q", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted kubefed cluster \"%s/%s\" for unjoin cluster %q.", fedCluster.Namespace, fedCluster.Name, unjoiningClusterName) + } + + return nil +} + +// deleteRBACResources deletes the cluster role, cluster rolebindings and service account +// from the unjoining cluster. +func deleteRBACResources(unjoiningClusterClientset kubeclient.Interface, + namespace, unjoiningClusterName, hostClusterName string, forceDeletion, dryRun bool) error { + + saName := util.ClusterServiceAccountName(unjoiningClusterName, hostClusterName) + + err := deleteClusterRoleAndBinding(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, forceDeletion, dryRun) + if err != nil { + return err + } + + err = deleteServiceAccount(unjoiningClusterClientset, saName, namespace, unjoiningClusterName, dryRun) + if err != nil { + return err + } + + return nil +} + +// deleteFedNSFromUnjoinCluster deletes the kubefed namespace from +// the unjoining cluster so long as the unjoining cluster is not the +// host cluster. +func deleteFedNSFromUnjoinCluster(hostClientset, unjoiningClusterClientset kubeclient.Interface, + kubefedNamespace, unjoiningClusterName string, dryRun bool) error { + + if dryRun { + return nil + } + + hostClusterNamespace, err := hostClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "Error retrieving namespace %q from host cluster", kubefedNamespace) + } + + unjoiningClusterNamespace, err := unjoiningClusterClientset.CoreV1().Namespaces().Get(kubefedNamespace, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "Error retrieving namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName) + } + + if controllerutil.IsPrimaryCluster(hostClusterNamespace, unjoiningClusterNamespace) { + klog.V(2).Infof("The kubefed namespace %q does not need to be deleted from the host cluster by unjoin.", kubefedNamespace) + return nil + } + + klog.V(2).Infof("Deleting kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName) + err = unjoiningClusterClientset.CoreV1().Namespaces().Delete(kubefedNamespace, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("The kubefed namespace %q no longer exists in unjoining cluster %q.", kubefedNamespace, unjoiningClusterName) + return nil + } else if err != nil { + return errors.Wrapf(err, "Could not delete kubefed namespace %q from unjoining cluster %q", kubefedNamespace, unjoiningClusterName) + } else { + klog.V(2).Infof("Deleted kubefed namespace %q from unjoining cluster %q.", kubefedNamespace, unjoiningClusterName) + } + + return nil +} + +// deleteServiceAccount deletes a service account in the cluster associated +// with clusterClientset with credentials that are used by the host cluster +// to access its API server. +func deleteServiceAccount(clusterClientset kubeclient.Interface, saName, + namespace, unjoiningClusterName string, dryRun bool) error { + if dryRun { + return nil + } + + klog.V(2).Infof("Deleting service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName) + + // Delete a service account. + err := clusterClientset.CoreV1().ServiceAccounts(namespace).Delete(saName, + &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Service account \"%s/%s\" does not exist.", namespace, saName) + } else if err != nil { + return errors.Wrapf(err, "Could not delete service account \"%s/%s\"", namespace, saName) + } else { + klog.V(2).Infof("Deleted service account \"%s/%s\" in unjoining cluster %q.", namespace, saName, unjoiningClusterName) + } + + return nil +} + +// deleteClusterRoleAndBinding deletes an RBAC cluster role and binding that +// allows the service account identified by saName to access all resources in +// all namespaces in the cluster associated with clusterClientset. +func deleteClusterRoleAndBinding(clusterClientset kubeclient.Interface, + saName, namespace, unjoiningClusterName string, forceDeletion, dryRun bool) error { + if dryRun { + return nil + } + + roleName := util.RoleName(saName) + healthCheckRoleName := util.HealthCheckRoleName(saName, namespace) + + // Attempt to delete all role and role bindings created by join + for _, name := range []string{roleName, healthCheckRoleName} { + klog.V(2).Infof("Deleting cluster role binding %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + + err := clusterClientset.RbacV1().ClusterRoleBindings().Delete(name, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Cluster role binding %q for service account %q does not exist in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete cluster role binding %q for service account %q in unjoining cluster %q", + name, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted cluster role binding %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } + + klog.V(2).Infof("Deleting cluster role %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + err = clusterClientset.RbacV1().ClusterRoles().Delete(name, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Cluster role %q for service account %q does not exist in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete cluster role %q for service account %q in unjoining cluster %q", + name, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted cluster role %q for service account %q in unjoining cluster %q.", + name, saName, unjoiningClusterName) + } + } + + klog.V(2).Infof("Deleting role binding \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + err := clusterClientset.RbacV1().RoleBindings(namespace).Delete(roleName, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Role binding \"%s/%s\" for service account %q does not exist in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete role binding \"%s/%s\" for service account %q in unjoining cluster %q", + namespace, roleName, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleted role binding \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } + + klog.V(2).Infof("Deleting role \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + err = clusterClientset.RbacV1().Roles(namespace).Delete(roleName, &metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Role \"%s/%s\" for service account %q does not exist in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } else if err != nil { + wrappedErr := errors.Wrapf(err, "Could not delete role \"%s/%s\" for service account %q in unjoining cluster %q", + namespace, roleName, saName, unjoiningClusterName) + if !forceDeletion { + return wrappedErr + } + klog.V(2).Infof("%v", wrappedErr) + } else { + klog.V(2).Infof("Deleting Role \"%s/%s\" for service account %q in unjoining cluster %q.", + namespace, roleName, saName, unjoiningClusterName) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/util/util.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/util/util.go new file mode 100644 index 000000000..9655b7f05 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/util/util.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "sigs.k8s.io/kubefed/pkg/kubefedctl/options" +) + +const FederatedKindPrefix = "Federated" + +// FedConfig provides a rest config based on the filesystem kubeconfig (via +// pathOptions) and context in order to talk to the host kubernetes cluster +// and the joining kubernetes cluster. +type FedConfig interface { + HostConfig(context, kubeconfigPath string) (*rest.Config, error) + ClusterConfig(context, kubeconfigPath string) (*rest.Config, error) + GetClientConfig(ontext, kubeconfigPath string) clientcmd.ClientConfig +} + +// fedConfig implements the FedConfig interface. +type fedConfig struct { + pathOptions *clientcmd.PathOptions +} + +// NewFedConfig creates a fedConfig for `kubefedctl` commands. +func NewFedConfig(pathOptions *clientcmd.PathOptions) FedConfig { + return &fedConfig{ + pathOptions: pathOptions, + } +} + +// HostConfig provides a rest config to talk to the host kubernetes cluster +// based on the context and kubeconfig passed in. +func (a *fedConfig) HostConfig(context, kubeconfigPath string) (*rest.Config, error) { + hostConfig := a.GetClientConfig(context, kubeconfigPath) + hostClientConfig, err := hostConfig.ClientConfig() + if err != nil { + return nil, err + } + + return hostClientConfig, nil +} + +// ClusterConfig provides a rest config to talk to the joining kubernetes +// cluster based on the context and kubeconfig passed in. +func (a *fedConfig) ClusterConfig(context, kubeconfigPath string) (*rest.Config, error) { + clusterConfig := a.GetClientConfig(context, kubeconfigPath) + clusterClientConfig, err := clusterConfig.ClientConfig() + if err != nil { + return nil, err + } + + return clusterClientConfig, nil +} + +// getClientConfig is a helper method to create a client config from the +// context and kubeconfig passed as arguments. +func (a *fedConfig) GetClientConfig(context, kubeconfigPath string) clientcmd.ClientConfig { + loadingRules := *a.pathOptions.LoadingRules + loadingRules.Precedence = a.pathOptions.GetLoadingPrecedence() + loadingRules.ExplicitPath = kubeconfigPath + overrides := &clientcmd.ConfigOverrides{ + CurrentContext: context, + } + + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, overrides) +} + +// HostClientset provides a kubernetes API compliant clientset to +// communicate with the host cluster's kubernetes API server. +func HostClientset(config *rest.Config) (*kubeclient.Clientset, error) { + return kubeclient.NewForConfig(config) +} + +// ClusterClientset provides a kubernetes API compliant clientset to +// communicate with the joining cluster's kubernetes API server. +func ClusterClientset(config *rest.Config) (*kubeclient.Clientset, error) { + return kubeclient.NewForConfig(config) +} + +// ClusterServiceAccountName returns the name of a service account whose +// credentials are used by the host cluster to access the client cluster. +func ClusterServiceAccountName(joiningClusterName, hostClusterName string) string { + return fmt.Sprintf("%s-%s", joiningClusterName, hostClusterName) +} + +// RoleName returns the name of a Role or ClusterRole and its +// associated RoleBinding or ClusterRoleBinding that are used to allow +// the service account to access necessary resources on the cluster. +func RoleName(serviceAccountName string) string { + return fmt.Sprintf("kubefed-controller-manager:%s", serviceAccountName) +} + +// HealthCheckRoleName returns the name of a ClusterRole and its +// associated ClusterRoleBinding that is used to allow the service +// account to check the health of the cluster and list nodes. +func HealthCheckRoleName(serviceAccountName, namespace string) string { + return fmt.Sprintf("kubefed-controller-manager:%s:healthcheck-%s", namespace, serviceAccountName) +} + +// IsFederatedAPIResource checks if a resource with the given Kind and group is a Federated one +func IsFederatedAPIResource(kind, group string) bool { + return strings.HasPrefix(kind, FederatedKindPrefix) && group == options.DefaultFederatedGroup +} + +// GetNamespace returns namespace of the current context +func GetNamespace(hostClusterContext string, kubeconfig string, config FedConfig) (string, error) { + clientConfig := config.GetClientConfig(hostClusterContext, kubeconfig) + currentContext, err := options.CurrentContext(clientConfig) + if err != nil { + return "", err + } + + ns, _, err := clientConfig.Namespace() + if err != nil { + return "", errors.Wrapf(err, "Failed to get ClientConfig for host cluster context %q and kubeconfig %q", + currentContext, kubeconfig) + } + + if len(ns) == 0 { + ns = "default" + } + return ns, nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/util/yaml_writer.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/util/yaml_writer.go new file mode 100644 index 000000000..3dff5fb01 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/util/yaml_writer.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "io" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func WriteUnstructuredToYaml(unstructuredObj *unstructured.Unstructured, w io.Writer) error { + // If status is included in the yaml, attempting to create it in a + // kube API will cause an error. + obj := unstructuredObj.DeepCopy() + unstructured.RemoveNestedField(obj.Object, "status") + unstructured.RemoveNestedField(obj.Object, "metadata", "creationTimestamp") + + errMsg := "Error encoding unstructured object to yaml" + objJSON, err := obj.MarshalJSON() + if err != nil { + return errors.Wrap(err, errMsg) + } + + data, err := yaml.JSONToYAML(objJSON) + if err != nil { + return errors.Wrap(err, errMsg) + } + _, err = w.Write(data) + if err != nil { + return errors.Wrap(err, errMsg) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/version.go b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/version.go new file mode 100644 index 000000000..56b414478 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/kubefedctl/version.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubefedctl + +import ( + "fmt" + "io" + + "github.com/spf13/cobra" + + "sigs.k8s.io/kubefed/pkg/version" +) + +var ( + version_long = ` + Version prints the version info of this command.` + version_example = ` + # Print kubefed command version + kubefed version` +) + +// NewCmdVersion prints out the release version info for this command binary. +func NewCmdVersion(out io.Writer) *cobra.Command { + cmd := &cobra.Command{ + Use: "version", + Short: "Print the version info", + Long: version_long, + Example: version_example, + Run: func(cmd *cobra.Command, args []string) { + fmt.Fprintf(out, "kubefedctl version: %s\n", fmt.Sprintf("%#v", version.Get())) + }, + } + + return cmd +} diff --git a/vendor/sigs.k8s.io/kubefed/pkg/version/base.go b/vendor/sigs.k8s.io/kubefed/pkg/version/base.go new file mode 100644 index 000000000..c7a18cb23 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/version/base.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags (via Makefile). +var ( + version = "v0.0.1-alpha.0" // output of "git describe" + // the prerequisite is that the branch should be + // tagged using the correct versioning strategy. + + gitCommit = "unknown" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "unknown" // state of git tree, either "clean" or "dirty" + + buildDate = "unknown" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/sigs.k8s.io/kubefed/pkg/version/version.go b/vendor/sigs.k8s.io/kubefed/pkg/version/version.go new file mode 100644 index 000000000..24a9a2ac8 --- /dev/null +++ b/vendor/sigs.k8s.io/kubefed/pkg/version/version.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" +) + +type Info struct { + Version string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return Info{ + Version: version, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +}