update vendor

Signed-off-by: Roland.Ma <rolandma@yunify.com>
This commit is contained in:
Roland.Ma
2021-08-11 07:10:14 +00:00
parent a18f72b565
commit ea8f47c73a
2901 changed files with 269317 additions and 43103 deletions

View File

@@ -1,26 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
import (
"sigs.k8s.io/kubefed/pkg/apis/multiclusterdns/v1alpha1"
)
func init() {
// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme)
}

View File

@@ -51,6 +51,10 @@ type KubeFedClusterSpec struct {
// If * is specified, it is expected to be the only option in list.
// +optional
DisabledTLSValidations []TLSValidation `json:"disabledTLSValidations,omitempty"`
// ProxyURL allows to set proxy URL for the cluster.
// +optional
ProxyURL string `json:"proxyURL"`
}
// LocalSecretReference is a reference to a secret within the enclosing

View File

@@ -37,6 +37,8 @@ type KubeFedConfigSpec struct {
ClusterHealthCheck *ClusterHealthCheckConfig `json:"clusterHealthCheck,omitempty"`
// +optional
SyncController *SyncControllerConfig `json:"syncController,omitempty"`
// +optional
StatusController *StatusControllerConfig `json:"statusController,omitempty"`
}
type DurationConfig struct {
@@ -105,6 +107,10 @@ type ClusterHealthCheckConfig struct {
}
type SyncControllerConfig struct {
// The maximum number of concurrent Reconciles of sync controller which can be run.
// Defaults to 1.
// +optional
MaxConcurrentReconciles *int64 `json:"maxConcurrentReconciles,omitempty"`
// Whether to adopt pre-existing resources in member clusters. Defaults to
// "Enabled".
// +optional
@@ -118,6 +124,13 @@ const (
AdoptResourcesDisabled ResourceAdoption = "Disabled"
)
type StatusControllerConfig struct {
// The maximum number of concurrent Reconciles of status controller which can be run.
// Defaults to 1.
// +optional
MaxConcurrentReconciles *int64 `json:"maxConcurrentReconciles,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=kubefedconfigs

View File

@@ -454,6 +454,11 @@ func (in *KubeFedConfigSpec) DeepCopyInto(out *KubeFedConfigSpec) {
*out = new(SyncControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.StatusController != nil {
in, out := &in.StatusController, &out.StatusController
*out = new(StatusControllerConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeFedConfigSpec.
@@ -516,9 +521,34 @@ func (in *LocalSecretReference) DeepCopy() *LocalSecretReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatusControllerConfig) DeepCopyInto(out *StatusControllerConfig) {
*out = *in
if in.MaxConcurrentReconciles != nil {
in, out := &in.MaxConcurrentReconciles, &out.MaxConcurrentReconciles
*out = new(int64)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusControllerConfig.
func (in *StatusControllerConfig) DeepCopy() *StatusControllerConfig {
if in == nil {
return nil
}
out := new(StatusControllerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncControllerConfig) DeepCopyInto(out *SyncControllerConfig) {
*out = *in
if in.MaxConcurrentReconciles != nil {
in, out := &in.MaxConcurrentReconciles, &out.MaxConcurrentReconciles
*out = new(int64)
**out = **in
}
if in.AdoptResources != nil {
in, out := &in.AdoptResources, &out.AdoptResources
*out = new(ResourceAdoption)

View File

@@ -1,85 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Targets is a representation of a list of targets for an endpoint.
type Targets []string
// TTL is a structure defining the TTL of a DNS record
type TTL int64
// Labels store metadata related to the endpoint
// it is then stored in a persistent storage via serialization
type Labels map[string]string
// Endpoint is a high-level association between a service and an IP.
type Endpoint struct {
// The FQDN of the DNS record.
DNSName string `json:"dnsName,omitempty"`
// The targets that the DNS record points to.
Targets Targets `json:"targets,omitempty"`
// RecordType type of record, e.g. CNAME, A, SRV, TXT etc.
RecordType string `json:"recordType,omitempty"`
// TTL for the record in seconds.
RecordTTL TTL `json:"recordTTL,omitempty"`
// Labels stores labels defined for the Endpoint.
// +optional
Labels Labels `json:"labels,omitempty"`
}
// DNSEndpointSpec defines the desired state of DNSEndpoint
type DNSEndpointSpec struct {
Endpoints []*Endpoint `json:"endpoints,omitempty"`
}
// DNSEndpointStatus defines the observed state of DNSEndpoint
type DNSEndpointStatus struct {
// ObservedGeneration is the generation as observed by the controller consuming the DNSEndpoint.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=dnsendpoints
// +kubebuilder:subresource:status
// DNSEndpoint is the CRD wrapper for Endpoint which is designed to act as a
// source of truth for external-dns.
type DNSEndpoint struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DNSEndpointSpec `json:"spec,omitempty"`
Status DNSEndpointStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// DNSEndpointList contains a list of DNSEndpoint
type DNSEndpointList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DNSEndpoint `json:"items"`
}
func init() {
SchemeBuilder.Register(&DNSEndpoint{}, &DNSEndpointList{})
}

View File

@@ -1,47 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=domains
type Domain struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Domain is the DNS zone associated with the KubeFed control plane
Domain string `json:"domain"`
// NameServer is the authoritative DNS name server for the KubeFed domain
NameServer string `json:"nameServer,omitempty"`
}
// +kubebuilder:object:root=true
// DomainList contains a list of Domain
type DomainList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Domain `json:"items"`
}
func init() {
SchemeBuilder.Register(&Domain{}, &DomainList{})
}

View File

@@ -1,43 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// NOTE: Boilerplate only. Ignore this file.
// Package v1alpha1 contains API Schema definitions for the multiclusterdns v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=multiclusterdns.kubefed.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: "multiclusterdns.kubefed.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme is required by pkg/client/...
AddToScheme = SchemeBuilder.AddToScheme
)
// Resource is required by pkg/client/listers/...
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

View File

@@ -1,69 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// IngressDNSRecordSpec defines the desired state of IngressDNSRecord
type IngressDNSRecordSpec struct {
// Host from the IngressRule in Cluster Ingress Spec
Hosts []string `json:"hosts,omitempty"`
// RecordTTL is the TTL in seconds for DNS records created for the Ingress, if omitted a default would be used
RecordTTL TTL `json:"recordTTL,omitempty"`
}
// IngressDNSRecordStatus defines the observed state of IngressDNSRecord
type IngressDNSRecordStatus struct {
// Array of Ingress Controller LoadBalancers
DNS []ClusterIngressDNS `json:"dns,omitempty"`
}
// ClusterIngressDNS defines the observed status of Ingress within a cluster.
type ClusterIngressDNS struct {
// Cluster name
Cluster string `json:"cluster,omitempty"`
// LoadBalancer for the corresponding ingress controller
LoadBalancer corev1.LoadBalancerStatus `json:"loadBalancer,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=ingressdnsrecords
// +kubebuilder:subresource:status
type IngressDNSRecord struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IngressDNSRecordSpec `json:"spec,omitempty"`
Status IngressDNSRecordStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// IngressDNSRecordList contains a list of IngressDNSRecord
type IngressDNSRecordList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []IngressDNSRecord `json:"items"`
}
func init() {
SchemeBuilder.Register(&IngressDNSRecord{}, &IngressDNSRecordList{})
}

View File

@@ -1,107 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ServiceDNSRecordSpec defines the desired state of ServiceDNSRecord.
type ServiceDNSRecordSpec struct {
// DomainRef is the name of the domain object to which the corresponding federated service belongs
DomainRef string `json:"domainRef"`
// RecordTTL is the TTL in seconds for DNS records created for this Service, if omitted a default would be used
RecordTTL TTL `json:"recordTTL,omitempty"`
// DNSPrefix when specified, an additional DNS record would be created with <DNSPrefix>.<KubeFedDomain>
DNSPrefix string `json:"dnsPrefix,omitempty"`
// ExternalName when specified, replaces the service name portion of a resource record
// with the value of ExternalName.
ExternalName string `json:"externalName,omitempty"`
// AllowServiceWithoutEndpoints allows DNS records to be written for Service shards without endpoints
AllowServiceWithoutEndpoints bool `json:"allowServiceWithoutEndpoints,omitempty"`
}
// ServiceDNSRecordStatus defines the observed state of ServiceDNSRecord.
type ServiceDNSRecordStatus struct {
// Domain is the DNS domain of the KubeFed control plane as in Domain API
Domain string `json:"domain,omitempty"`
DNS []ClusterDNS `json:"dns,omitempty"`
}
// ClusterDNS defines the observed status of LoadBalancer within a cluster.
type ClusterDNS struct {
// Cluster name
Cluster string `json:"cluster,omitempty"`
// LoadBalancer for the corresponding service
LoadBalancer corev1.LoadBalancerStatus `json:"loadBalancer,omitempty"`
// Zones to which the cluster belongs
Zones []string `json:"zones,omitempty"`
// Region to which the cluster belongs
Region string `json:"region,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceDNSRecord defines a scheme of DNS name and subdomains that
// should be programmed with endpoint information about a Service deployed in
// multiple Kubernetes clusters. ServiceDNSRecord is name-associated
// with the Services it programs endpoint information for, meaning that a
// ServiceDNSRecord expresses the intent to program DNS with
// information about endpoints for the Kubernetes Service resources with the
// same name and namespace in different clusters.
//
// For the example, given the following values:
//
// metadata.name: test-service
// metadata.namespace: test-namespace
// spec.federationName: test-federation
//
// the following set of DNS names will be programmed:
//
// Global Level: test-service.test-namespace.test-federation.svc.<federation-domain>
// Region Level: test-service.test-namespace.test-federation.svc.(status.DNS[*].region).<federation-domain>
// Zone Level : test-service.test-namespace.test-federation.svc.(status.DNS[*].zone).(status.DNS[*].region).<federation-domain>
//
// Optionally, when DNSPrefix is specified, another DNS name will be programmed
// which would be a CNAME record pointing to DNS name at global level as below:
// <dns-prefix>.<federation-domain> --> test-service.test-namespace.test-federation.svc.<federation-domain>
//
// +k8s:openapi-gen=true
// +kubebuilder:resource:path=servicednsrecords
// +kubebuilder:subresource:status
type ServiceDNSRecord struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ServiceDNSRecordSpec `json:"spec,omitempty"`
Status ServiceDNSRecordStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceDNSRecordList contains a list of ServiceDNSRecord
type ServiceDNSRecordList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ServiceDNSRecord `json:"items"`
}
func init() {
SchemeBuilder.Register(&ServiceDNSRecord{}, &ServiceDNSRecordList{})
}

View File

@@ -1,483 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterDNS) DeepCopyInto(out *ClusterDNS) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDNS.
func (in *ClusterDNS) DeepCopy() *ClusterDNS {
if in == nil {
return nil
}
out := new(ClusterDNS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterIngressDNS) DeepCopyInto(out *ClusterIngressDNS) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIngressDNS.
func (in *ClusterIngressDNS) DeepCopy() *ClusterIngressDNS {
if in == nil {
return nil
}
out := new(ClusterIngressDNS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNSEndpoint) DeepCopyInto(out *DNSEndpoint) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpoint.
func (in *DNSEndpoint) DeepCopy() *DNSEndpoint {
if in == nil {
return nil
}
out := new(DNSEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DNSEndpoint) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNSEndpointList) DeepCopyInto(out *DNSEndpointList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DNSEndpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointList.
func (in *DNSEndpointList) DeepCopy() *DNSEndpointList {
if in == nil {
return nil
}
out := new(DNSEndpointList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DNSEndpointList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNSEndpointSpec) DeepCopyInto(out *DNSEndpointSpec) {
*out = *in
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]*Endpoint, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Endpoint)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointSpec.
func (in *DNSEndpointSpec) DeepCopy() *DNSEndpointSpec {
if in == nil {
return nil
}
out := new(DNSEndpointSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNSEndpointStatus) DeepCopyInto(out *DNSEndpointStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSEndpointStatus.
func (in *DNSEndpointStatus) DeepCopy() *DNSEndpointStatus {
if in == nil {
return nil
}
out := new(DNSEndpointStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Domain) DeepCopyInto(out *Domain) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain.
func (in *Domain) DeepCopy() *Domain {
if in == nil {
return nil
}
out := new(Domain)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Domain) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainList) DeepCopyInto(out *DomainList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Domain, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList.
func (in *DomainList) DeepCopy() *DomainList {
if in == nil {
return nil
}
out := new(DomainList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DomainList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Endpoint) DeepCopyInto(out *Endpoint) {
*out = *in
if in.Targets != nil {
in, out := &in.Targets, &out.Targets
*out = make(Targets, len(*in))
copy(*out, *in)
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(Labels, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
func (in *Endpoint) DeepCopy() *Endpoint {
if in == nil {
return nil
}
out := new(Endpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressDNSRecord) DeepCopyInto(out *IngressDNSRecord) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecord.
func (in *IngressDNSRecord) DeepCopy() *IngressDNSRecord {
if in == nil {
return nil
}
out := new(IngressDNSRecord)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressDNSRecord) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressDNSRecordList) DeepCopyInto(out *IngressDNSRecordList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IngressDNSRecord, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecordList.
func (in *IngressDNSRecordList) DeepCopy() *IngressDNSRecordList {
if in == nil {
return nil
}
out := new(IngressDNSRecordList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressDNSRecordList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressDNSRecordSpec) DeepCopyInto(out *IngressDNSRecordSpec) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecordSpec.
func (in *IngressDNSRecordSpec) DeepCopy() *IngressDNSRecordSpec {
if in == nil {
return nil
}
out := new(IngressDNSRecordSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressDNSRecordStatus) DeepCopyInto(out *IngressDNSRecordStatus) {
*out = *in
if in.DNS != nil {
in, out := &in.DNS, &out.DNS
*out = make([]ClusterIngressDNS, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDNSRecordStatus.
func (in *IngressDNSRecordStatus) DeepCopy() *IngressDNSRecordStatus {
if in == nil {
return nil
}
out := new(IngressDNSRecordStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Labels) DeepCopyInto(out *Labels) {
{
in := &in
*out = make(Labels, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Labels.
func (in Labels) DeepCopy() Labels {
if in == nil {
return nil
}
out := new(Labels)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceDNSRecord) DeepCopyInto(out *ServiceDNSRecord) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecord.
func (in *ServiceDNSRecord) DeepCopy() *ServiceDNSRecord {
if in == nil {
return nil
}
out := new(ServiceDNSRecord)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceDNSRecord) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceDNSRecordList) DeepCopyInto(out *ServiceDNSRecordList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ServiceDNSRecord, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecordList.
func (in *ServiceDNSRecordList) DeepCopy() *ServiceDNSRecordList {
if in == nil {
return nil
}
out := new(ServiceDNSRecordList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceDNSRecordList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceDNSRecordSpec) DeepCopyInto(out *ServiceDNSRecordSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecordSpec.
func (in *ServiceDNSRecordSpec) DeepCopy() *ServiceDNSRecordSpec {
if in == nil {
return nil
}
out := new(ServiceDNSRecordSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceDNSRecordStatus) DeepCopyInto(out *ServiceDNSRecordStatus) {
*out = *in
if in.DNS != nil {
in, out := &in.DNS, &out.DNS
*out = make([]ClusterDNS, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDNSRecordStatus.
func (in *ServiceDNSRecordStatus) DeepCopy() *ServiceDNSRecordStatus {
if in == nil {
return nil
}
out := new(ServiceDNSRecordStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Targets) DeepCopyInto(out *Targets) {
{
in := &in
*out = make(Targets, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Targets.
func (in Targets) DeepCopy() Targets {
if in == nil {
return nil
}
out := new(Targets)
in.DeepCopyInto(out)
return *out
}

View File

@@ -45,6 +45,14 @@ type ReplicaSchedulingPreferenceSpec struct {
// +optional
Rebalance bool `json:"rebalance,omitempty"`
// If set to true, the placement of target kind will be determined using the instersection
// of RSP placement scheduling result and the clusterSelector (spec.placement.clusterSelector)
// specified on the target kind.
// If set to false or not defined, RSP placement scheduling result overwrites the clusters
// list in the spec.placement.clusters of the target resource.
// +optional
IntersectWithClusterSelector bool `json:"intersectWithClusterSelector"`
// A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in
// these clusters.
// "*" (if provided) applies to all clusters if an explicit mapping is not provided.

View File

@@ -20,28 +20,28 @@ import (
"context"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/kubefed/pkg/client/generic/scheme"
)
type Client interface {
Create(ctx context.Context, obj runtime.Object) error
Get(ctx context.Context, obj runtime.Object, namespace, name string) error
Update(ctx context.Context, obj runtime.Object) error
Delete(ctx context.Context, obj runtime.Object, namespace, name string) error
List(ctx context.Context, obj runtime.Object, namespace string, opts ...client.ListOption) error
UpdateStatus(ctx context.Context, obj runtime.Object) error
Create(ctx context.Context, obj runtimeclient.Object) error
Get(ctx context.Context, obj runtimeclient.Object, namespace, name string) error
Update(ctx context.Context, obj runtimeclient.Object) error
Delete(ctx context.Context, obj runtimeclient.Object, namespace, name string, opts ...runtimeclient.DeleteOption) error
List(ctx context.Context, obj runtimeclient.ObjectList, namespace string, opts ...runtimeclient.ListOption) error
UpdateStatus(ctx context.Context, obj runtimeclient.Object) error
Patch(ctx context.Context, obj runtimeclient.Object, patch runtimeclient.Patch, opts ...runtimeclient.PatchOption) error
}
type genericClient struct {
client client.Client
client runtimeclient.Client
}
func New(config *rest.Config) (Client, error) {
client, err := client.New(config, client.Options{Scheme: scheme.Scheme})
client, err := runtimeclient.New(config, runtimeclient.Options{Scheme: scheme.Scheme})
return &genericClient{client}, err
}
@@ -59,33 +59,37 @@ func NewForConfigOrDieWithUserAgent(config *rest.Config, userAgent string) Clien
return NewForConfigOrDie(configCopy)
}
func (c *genericClient) Create(ctx context.Context, obj runtime.Object) error {
func (c *genericClient) Create(ctx context.Context, obj runtimeclient.Object) error {
return c.client.Create(ctx, obj)
}
func (c *genericClient) Get(ctx context.Context, obj runtime.Object, namespace, name string) error {
return c.client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, obj)
func (c *genericClient) Get(ctx context.Context, obj runtimeclient.Object, namespace, name string) error {
return c.client.Get(ctx, runtimeclient.ObjectKey{Namespace: namespace, Name: name}, obj)
}
func (c *genericClient) Update(ctx context.Context, obj runtime.Object) error {
func (c *genericClient) Update(ctx context.Context, obj runtimeclient.Object) error {
return c.client.Update(ctx, obj)
}
func (c *genericClient) Delete(ctx context.Context, obj runtime.Object, namespace, name string) error {
func (c *genericClient) Delete(ctx context.Context, obj runtimeclient.Object, namespace, name string, opts ...runtimeclient.DeleteOption) error {
accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
accessor.SetNamespace(namespace)
accessor.SetName(name)
return c.client.Delete(ctx, obj)
return c.client.Delete(ctx, obj, opts...)
}
func (c *genericClient) List(ctx context.Context, obj runtime.Object, namespace string, opts ...client.ListOption) error {
opts = append(opts, client.InNamespace(namespace))
func (c *genericClient) List(ctx context.Context, obj runtimeclient.ObjectList, namespace string, opts ...runtimeclient.ListOption) error {
opts = append(opts, runtimeclient.InNamespace(namespace))
return c.client.List(ctx, obj, opts...)
}
func (c *genericClient) UpdateStatus(ctx context.Context, obj runtime.Object) error {
func (c *genericClient) UpdateStatus(ctx context.Context, obj runtimeclient.Object) error {
return c.client.Status().Update(ctx, obj)
}
func (c *genericClient) Patch(ctx context.Context, obj runtimeclient.Object, patch runtimeclient.Patch, opts ...runtimeclient.PatchOption) error {
return c.client.Patch(ctx, obj, patch, opts...)
}

View File

@@ -26,13 +26,13 @@ import (
"time"
"github.com/pkg/errors"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "k8s.io/api/core/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/transport"
"k8s.io/klog"
"k8s.io/klog/v2"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
"sigs.k8s.io/kubefed/pkg/client/generic"
@@ -41,10 +41,10 @@ import (
const (
DefaultKubeFedSystemNamespace = "kube-federation-system"
KubeAPIQPS = 20.0
KubeAPIBurst = 30
TokenKey = "token"
KubeAPIQPS = 20.0
KubeAPIBurst = 30
TokenKey = "token"
CaCrtKey = "ca.crt"
KubeFedConfigName = "kubefed"
)
@@ -84,6 +84,14 @@ func BuildClusterConfig(fedCluster *fedv1b1.KubeFedCluster, client generic.Clien
clusterConfig.QPS = KubeAPIQPS
clusterConfig.Burst = KubeAPIBurst
if fedCluster.Spec.ProxyURL != "" {
proxyURL, err := url.Parse(fedCluster.Spec.ProxyURL)
if err != nil {
return nil, errors.Errorf("Failed to parse provided proxy URL %s: %v", fedCluster.Spec.ProxyURL, err)
}
clusterConfig.Proxy = http.ProxyURL(proxyURL)
}
if len(fedCluster.Spec.DisabledTLSValidations) != 0 {
klog.V(1).Infof("Cluster %s will use a custom transport for TLS certificate validation", fedCluster.Name)
if err = CustomizeTLSTransport(fedCluster, clusterConfig); err != nil {
@@ -98,7 +106,7 @@ func BuildClusterConfig(fedCluster *fedv1b1.KubeFedCluster, client generic.Clien
// primary cluster by checking if the UIDs match for both ObjectMetas passed
// in.
// TODO (font): Need to revisit this when cluster ID is available.
func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool {
func IsPrimaryCluster(obj, clusterObj runtimeclient.Object) bool {
meta := MetaAccessor(obj)
clusterMeta := MetaAccessor(clusterObj)
return meta.GetUID() == clusterMeta.GetUID()

View File

@@ -68,12 +68,14 @@ type ClusterHealthCheckConfig struct {
// controllers.
type ControllerConfig struct {
KubeFedNamespaces
KubeConfig *restclient.Config
ClusterAvailableDelay time.Duration
ClusterUnavailableDelay time.Duration
MinimizeLatency bool
SkipAdoptingResources bool
RawResourceStatusCollection bool
KubeConfig *restclient.Config
ClusterAvailableDelay time.Duration
ClusterUnavailableDelay time.Duration
MinimizeLatency bool
MaxConcurrentSyncReconciles int64
MaxConcurrentStatusReconciles int64
SkipAdoptingResources bool
RawResourceStatusCollection bool
}
func (c *ControllerConfig) LimitedScope() bool {

View File

@@ -0,0 +1,73 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// DeleteOptionAnnotation contains options for delete
// while deleting resources for member clusters.
DeleteOptionAnnotation = "kubefed.io/deleteoption"
)
// GetDeleteOptions return delete options from the annotation
func GetDeleteOptions(obj *unstructured.Unstructured) ([]client.DeleteOption, error) {
options := make([]client.DeleteOption, 0)
annotations := obj.GetAnnotations()
if annotations == nil {
return options, nil
}
if optStr, ok := annotations[DeleteOptionAnnotation]; ok {
opt := &metav1.DeleteOptions{}
if err := json.Unmarshal([]byte(optStr), opt); err != nil {
return nil, errors.Wrapf(err, "could not deserialize delete options from annotation value '%s'", optStr)
}
clientOpt := &client.DeleteOptions{}
clientOpt.GracePeriodSeconds = opt.GracePeriodSeconds
clientOpt.PropagationPolicy = opt.PropagationPolicy
clientOpt.Preconditions = opt.Preconditions
options = append(options, clientOpt)
}
return options, nil
}
// ApplyDeleteOptions set the DeleteOptions on the annotation
func ApplyDeleteOptions(obj *unstructured.Unstructured, opts ...client.DeleteOption) error {
opt := client.DeleteOptions{}
opt.ApplyOptions(opts)
deleteOpts := opt.AsDeleteOptions()
optBytes, err := json.Marshal(deleteOpts)
if err != nil {
return errors.Wrapf(err, "could not serialize delete options from object '%v'", deleteOpts)
}
annotations := obj.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[DeleteOptionAnnotation] = string(optBytes)
obj.SetAnnotations(annotations)
return nil
}

View File

@@ -23,13 +23,13 @@ import (
"time"
"github.com/pkg/errors"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"k8s.io/klog/v2"
fedcommon "sigs.k8s.io/kubefed/pkg/apis/core/common"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
@@ -139,7 +139,7 @@ func NewFederatedInformer(
config *ControllerConfig,
client generic.Client,
apiResource *metav1.APIResource,
triggerFunc func(pkgruntime.Object),
triggerFunc func(runtimeclient.Object),
clusterLifecycle *ClusterLifecycleHandlerFuncs) (FederatedInformer, error) {
targetInformerFactory := func(cluster *fedv1b1.KubeFedCluster, clusterConfig *restclient.Config) (cache.Store, cache.Controller, error) {
resourceClient, err := NewResourceClient(clusterConfig, apiResource)

View File

@@ -21,10 +21,11 @@ import (
"time"
"github.com/pkg/errors"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
@@ -33,11 +34,11 @@ import (
"sigs.k8s.io/kubefed/pkg/client/generic/scheme"
)
func NewGenericInformer(config *rest.Config, namespace string, obj pkgruntime.Object, resyncPeriod time.Duration, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller, error) {
func NewGenericInformer(config *rest.Config, namespace string, obj runtimeclient.Object, resyncPeriod time.Duration, triggerFunc func(runtimeclient.Object)) (cache.Store, cache.Controller, error) {
return NewGenericInformerWithEventHandler(config, namespace, obj, resyncPeriod, NewTriggerOnAllChanges(triggerFunc))
}
func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, obj pkgruntime.Object, resyncPeriod time.Duration, resourceEventHandlerFuncs *cache.ResourceEventHandlerFuncs) (cache.Store, cache.Controller, error) {
func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, obj runtimeclient.Object, resyncPeriod time.Duration, resourceEventHandlerFuncs *cache.ResourceEventHandlerFuncs) (cache.Store, cache.Controller, error) {
gvk, err := apiutil.GVKForObject(obj, scheme.Scheme)
if err != nil {
return nil, nil, err
@@ -53,7 +54,7 @@ func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, o
return nil, nil, err
}
client, err := apiutil.RESTClientForGVK(gvk, config, scheme.Codecs)
client, err := apiutil.RESTClientForGVK(gvk, false, config, scheme.Codecs)
if err != nil {
return nil, nil, err
}

View File

@@ -19,13 +19,13 @@ package util
import (
"reflect"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on all object changes.
func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
func NewTriggerOnAllChanges(triggerFunc func(runtimeclient.Object)) *cache.ResourceEventHandlerFuncs {
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok {
@@ -35,15 +35,15 @@ func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.Resource
return
}
}
oldObj := old.(pkgruntime.Object)
oldObj := old.(runtimeclient.Object)
triggerFunc(oldObj)
},
AddFunc: func(cur interface{}) {
curObj := cur.(pkgruntime.Object)
curObj := cur.(runtimeclient.Object)
triggerFunc(curObj)
},
UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkgruntime.Object)
curObj := cur.(runtimeclient.Object)
if !reflect.DeepEqual(old, cur) {
triggerFunc(curObj)
}

View File

@@ -21,11 +21,10 @@ import (
"reflect"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// Copies cluster-independent, user provided data from the given ObjectMeta struct. If in
@@ -105,7 +104,7 @@ func ObjectMetaObjEquivalent(a, b metav1.Object) bool {
// Checks if cluster-independent, user provided data in ObjectMeta and Spec in two given top
// level api objects are equivalent.
func ObjectMetaAndSpecEquivalent(a, b runtime.Object) bool {
func ObjectMetaAndSpecEquivalent(a, b runtimeclient.Object) bool {
objectMetaA := reflect.ValueOf(a).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta)
objectMetaB := reflect.ValueOf(b).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta)
specA := reflect.ValueOf(a).Elem().FieldByName("Spec").Interface()
@@ -113,7 +112,7 @@ func ObjectMetaAndSpecEquivalent(a, b runtime.Object) bool {
return ObjectMetaEquivalent(objectMetaA, objectMetaB) && reflect.DeepEqual(specA, specB)
}
func MetaAccessor(obj runtime.Object) metav1.Object {
func MetaAccessor(obj runtimeclient.Object) metav1.Object {
accessor, err := meta.Accessor(obj)
if err != nil {
// This should always succeed if obj is not nil. Also,

View File

@@ -20,6 +20,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
)
type GenericClusterReference struct {
@@ -86,3 +89,96 @@ func SetClusterNames(obj *unstructured.Unstructured, clusterNames []string) erro
}
return unstructured.SetNestedSlice(obj.Object, clusters, SpecField, PlacementField, ClustersField)
}
func SetClusterSelector(obj *unstructured.Unstructured, clusterSelector map[string]string) error {
return unstructured.SetNestedStringMap(obj.Object, clusterSelector, SpecField, PlacementField, ClusterSelectorField, MatchLabelsField)
}
// ComputeNamespacedPlacement determines placement for namespaced
// federated resources (e.g. FederatedConfigMap).
//
// If KubeFed is deployed cluster-wide, placement is the intersection
// of the placement for the federated resource and the placement of
// the federated namespace containing the resource.
//
// If KubeFed is limited to a single namespace, placement is
// determined as the intersection of resource and namespace placement
// if namespace placement exists. If namespace placement does not
// exist, resource placement will be used verbatim. This is possible
// because the single namespace by definition must exist on member
// clusters, so namespace placement becomes a mechanism for limiting
// rather than allowing propagation.
func ComputeNamespacedPlacement(resource, namespace *unstructured.Unstructured, clusters []*fedv1b1.KubeFedCluster, limitedScope bool, selectorOnly bool) (selectedClusters sets.String, err error) {
resourceClusters, err := ComputePlacement(resource, clusters, selectorOnly)
if err != nil {
return nil, err
}
if namespace == nil {
if limitedScope {
// Use the resource placement verbatim if no federated
// namespace is present and KubeFed is targeting a
// single namespace.
return resourceClusters, nil
}
// Resource should not exist in any member clusters.
return sets.String{}, nil
}
namespaceClusters, err := ComputePlacement(namespace, clusters, selectorOnly)
if err != nil {
return nil, err
}
// If both namespace and resource placement exist, the desired
// list of clusters is their intersection.
return resourceClusters.Intersection(namespaceClusters), nil
}
// ComputePlacement determines the selected clusters for a federated
// resource.
func ComputePlacement(resource *unstructured.Unstructured, clusters []*fedv1b1.KubeFedCluster, selectorOnly bool) (selectedClusters sets.String, err error) {
selectedNames, err := selectedClusterNames(resource, clusters, selectorOnly)
if err != nil {
return nil, err
}
clusterNames := getClusterNames(clusters)
return clusterNames.Intersection(selectedNames), nil
}
func selectedClusterNames(resource *unstructured.Unstructured, clusters []*fedv1b1.KubeFedCluster, selectorOnly bool) (sets.String, error) {
placement, err := UnmarshalGenericPlacement(resource)
if err != nil {
return nil, err
}
selectedNames := sets.String{}
clusterNames := placement.ClusterNames()
// Only use selector if clusters are nil. An empty list of
// clusters implies no clusters are selected.
if selectorOnly || clusterNames == nil {
selector, err := placement.ClusterSelector()
if err != nil {
return nil, err
}
for _, cluster := range clusters {
if selector.Matches(labels.Set(cluster.Labels)) {
selectedNames.Insert(cluster.Name)
}
}
} else {
for _, clusterName := range clusterNames {
selectedNames.Insert(clusterName)
}
}
return selectedNames, nil
}
func getClusterNames(clusters []*fedv1b1.KubeFedCluster) sets.String {
clusterNames := sets.String{}
for _, cluster := range clusters {
clusterNames.Insert(cluster.Name)
}
return clusterNames
}

View File

@@ -19,8 +19,8 @@ package util
import (
"fmt"
meta "k8s.io/apimachinery/pkg/api/meta"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/api/meta"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// QualifiedName comprises a resource name with an optional namespace.
@@ -35,7 +35,7 @@ type QualifiedName struct {
Name string
}
func NewQualifiedName(obj pkgruntime.Object) QualifiedName {
func NewQualifiedName(obj runtimeclient.Object) QualifiedName {
accessor, err := meta.Accessor(obj)
if err != nil {
// TODO(marun) This should never happen, but if it does, the

View File

@@ -20,6 +20,7 @@ import (
"context"
"github.com/pkg/errors"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -32,18 +33,18 @@ import (
)
// NewResourceInformer returns an unfiltered informer.
func NewResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller) {
func NewResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(runtimeclient.Object)) (cache.Store, cache.Controller) {
return newResourceInformer(client, namespace, apiResource, triggerFunc, "")
}
// NewManagedResourceInformer returns an informer limited to resources
// managed by KubeFed as indicated by labeling.
func NewManagedResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller) {
func NewManagedResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(runtimeclient.Object)) (cache.Store, cache.Controller) {
labelSelector := labels.Set(map[string]string{ManagedByKubeFedLabelKey: ManagedByKubeFedLabelValue}).AsSelector().String()
return newResourceInformer(client, namespace, apiResource, triggerFunc, labelSelector)
}
func newResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object), labelSelector string) (cache.Store, cache.Controller) {
func newResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(runtimeclient.Object), labelSelector string) (cache.Store, cache.Controller) {
obj := &unstructured.Unstructured{}
if apiResource != nil {
@@ -78,7 +79,7 @@ func ObjFromCache(store cache.Store, kind, key string) (*unstructured.Unstructur
return obj.(*unstructured.Unstructured), nil
}
func rawObjFromCache(store cache.Store, kind, key string) (pkgruntime.Object, error) {
func rawObjFromCache(store cache.Store, kind, key string) (runtimeclient.Object, error) {
cachedObj, exist, err := store.GetByKey(key)
if err != nil {
wrappedErr := errors.Wrapf(err, "Failed to query %s store for %q", kind, key)
@@ -88,5 +89,5 @@ func rawObjFromCache(store cache.Store, kind, key string) (pkgruntime.Object, er
if !exist {
return nil, nil
}
return cachedObj.(pkgruntime.Object).DeepCopyObject(), nil
return cachedObj.(runtimeclient.Object).DeepCopyObject().(runtimeclient.Object), nil
}

View File

@@ -19,10 +19,10 @@ package util
import (
"time"
pkgruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/workqueue"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)
type ReconcileFunc func(qualifiedName QualifiedName) ReconciliationStatus
@@ -32,12 +32,19 @@ type ReconcileWorker interface {
EnqueueForClusterSync(qualifiedName QualifiedName)
EnqueueForError(qualifiedName QualifiedName)
EnqueueForRetry(qualifiedName QualifiedName)
EnqueueObject(obj pkgruntime.Object)
EnqueueObject(obj runtimeclient.Object)
EnqueueWithDelay(qualifiedName QualifiedName, delay time.Duration)
Run(stopChan <-chan struct{})
SetDelay(retryDelay, clusterSyncDelay time.Duration)
}
type WorkerOptions struct {
WorkerTiming
// MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1.
MaxConcurrentReconciles int
}
type WorkerTiming struct {
Interval time.Duration
RetryDelay time.Duration
@@ -47,10 +54,14 @@ type WorkerTiming struct {
}
type asyncWorker struct {
name string
reconcile ReconcileFunc
timing WorkerTiming
maxConcurrentReconciles int
// For triggering reconciliation of a single resource. This is
// used when there is an add/update/delete operation on a resource
// in either the API of the cluster hosting KubeFed or in the API
@@ -64,25 +75,30 @@ type asyncWorker struct {
backoff *flowcontrol.Backoff
}
func NewReconcileWorker(reconcile ReconcileFunc, timing WorkerTiming) ReconcileWorker {
if timing.Interval == 0 {
timing.Interval = time.Second * 1
func NewReconcileWorker(name string, reconcile ReconcileFunc, options WorkerOptions) ReconcileWorker {
if options.Interval == 0 {
options.Interval = time.Second * 1
}
if timing.RetryDelay == 0 {
timing.RetryDelay = time.Second * 10
if options.RetryDelay == 0 {
options.RetryDelay = time.Second * 10
}
if timing.InitialBackoff == 0 {
timing.InitialBackoff = time.Second * 5
if options.InitialBackoff == 0 {
options.InitialBackoff = time.Second * 5
}
if timing.MaxBackoff == 0 {
timing.MaxBackoff = time.Minute
if options.MaxBackoff == 0 {
options.MaxBackoff = time.Minute
}
if options.MaxConcurrentReconciles == 0 {
options.MaxConcurrentReconciles = 1
}
return &asyncWorker{
reconcile: reconcile,
timing: timing,
deliverer: NewDelayingDeliverer(),
queue: workqueue.New(),
backoff: flowcontrol.NewBackOff(timing.InitialBackoff, timing.MaxBackoff),
name: name,
reconcile: reconcile,
timing: options.WorkerTiming,
maxConcurrentReconciles: options.MaxConcurrentReconciles,
deliverer: NewDelayingDeliverer(),
queue: workqueue.NewNamed(name),
backoff: flowcontrol.NewBackOff(options.InitialBackoff, options.MaxBackoff),
}
}
@@ -102,7 +118,7 @@ func (w *asyncWorker) EnqueueForClusterSync(qualifiedName QualifiedName) {
w.deliver(qualifiedName, w.timing.ClusterSyncDelay, false)
}
func (w *asyncWorker) EnqueueObject(obj pkgruntime.Object) {
func (w *asyncWorker) EnqueueObject(obj runtimeclient.Object) {
qualifiedName := NewQualifiedName(obj)
w.Enqueue(qualifiedName)
}
@@ -114,9 +130,15 @@ func (w *asyncWorker) EnqueueWithDelay(qualifiedName QualifiedName, delay time.D
func (w *asyncWorker) Run(stopChan <-chan struct{}) {
StartBackoffGC(w.backoff, stopChan)
w.deliverer.StartWithHandler(func(item *DelayingDelivererItem) {
w.queue.Add(item)
qualifiedName, ok := item.Value.(*QualifiedName)
if ok {
w.queue.Add(*qualifiedName)
}
})
go wait.Until(w.worker, w.timing.Interval, stopChan)
for i := 0; i < w.maxConcurrentReconciles; i++ {
go wait.Until(w.worker, w.timing.Interval, stopChan)
}
// Ensure all goroutines are cleaned up when the stop channel closes
go func() {
@@ -145,26 +167,32 @@ func (w *asyncWorker) deliver(qualifiedName QualifiedName, delay time.Duration,
}
func (w *asyncWorker) worker() {
for {
obj, quit := w.queue.Get()
if quit {
return
}
item := obj.(*DelayingDelivererItem)
qualifiedName := item.Value.(*QualifiedName)
status := w.reconcile(*qualifiedName)
w.queue.Done(item)
switch status {
case StatusAllOK:
break
case StatusError:
w.EnqueueForError(*qualifiedName)
case StatusNeedsRecheck:
w.EnqueueForRetry(*qualifiedName)
case StatusNotSynced:
w.EnqueueForClusterSync(*qualifiedName)
}
for w.reconcileOnce() {
}
}
func (w *asyncWorker) reconcileOnce() bool {
obj, quit := w.queue.Get()
if quit {
return false
}
defer w.queue.Done(obj)
qualifiedName, ok := obj.(QualifiedName)
if !ok {
return true
}
status := w.reconcile(qualifiedName)
switch status {
case StatusAllOK:
break
case StatusError:
w.EnqueueForError(qualifiedName)
case StatusNeedsRecheck:
w.EnqueueForRetry(qualifiedName)
case StatusNotSynced:
w.EnqueueForClusterSync(qualifiedName)
}
return true
}

View File

@@ -20,7 +20,7 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)