22
vendor/sigs.k8s.io/kubefed/pkg/controller/util/cluster_util.go
generated
vendored
22
vendor/sigs.k8s.io/kubefed/pkg/controller/util/cluster_util.go
generated
vendored
@@ -26,13 +26,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/transport"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
"sigs.k8s.io/kubefed/pkg/client/generic"
|
||||
@@ -41,10 +41,10 @@ import (
|
||||
const (
|
||||
DefaultKubeFedSystemNamespace = "kube-federation-system"
|
||||
|
||||
KubeAPIQPS = 20.0
|
||||
KubeAPIBurst = 30
|
||||
TokenKey = "token"
|
||||
|
||||
KubeAPIQPS = 20.0
|
||||
KubeAPIBurst = 30
|
||||
TokenKey = "token"
|
||||
CaCrtKey = "ca.crt"
|
||||
KubeFedConfigName = "kubefed"
|
||||
)
|
||||
|
||||
@@ -84,6 +84,14 @@ func BuildClusterConfig(fedCluster *fedv1b1.KubeFedCluster, client generic.Clien
|
||||
clusterConfig.QPS = KubeAPIQPS
|
||||
clusterConfig.Burst = KubeAPIBurst
|
||||
|
||||
if fedCluster.Spec.ProxyURL != "" {
|
||||
proxyURL, err := url.Parse(fedCluster.Spec.ProxyURL)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Failed to parse provided proxy URL %s: %v", fedCluster.Spec.ProxyURL, err)
|
||||
}
|
||||
clusterConfig.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
|
||||
if len(fedCluster.Spec.DisabledTLSValidations) != 0 {
|
||||
klog.V(1).Infof("Cluster %s will use a custom transport for TLS certificate validation", fedCluster.Name)
|
||||
if err = CustomizeTLSTransport(fedCluster, clusterConfig); err != nil {
|
||||
@@ -98,7 +106,7 @@ func BuildClusterConfig(fedCluster *fedv1b1.KubeFedCluster, client generic.Clien
|
||||
// primary cluster by checking if the UIDs match for both ObjectMetas passed
|
||||
// in.
|
||||
// TODO (font): Need to revisit this when cluster ID is available.
|
||||
func IsPrimaryCluster(obj, clusterObj pkgruntime.Object) bool {
|
||||
func IsPrimaryCluster(obj, clusterObj runtimeclient.Object) bool {
|
||||
meta := MetaAccessor(obj)
|
||||
clusterMeta := MetaAccessor(clusterObj)
|
||||
return meta.GetUID() == clusterMeta.GetUID()
|
||||
|
||||
14
vendor/sigs.k8s.io/kubefed/pkg/controller/util/controllerconfig.go
generated
vendored
14
vendor/sigs.k8s.io/kubefed/pkg/controller/util/controllerconfig.go
generated
vendored
@@ -68,12 +68,14 @@ type ClusterHealthCheckConfig struct {
|
||||
// controllers.
|
||||
type ControllerConfig struct {
|
||||
KubeFedNamespaces
|
||||
KubeConfig *restclient.Config
|
||||
ClusterAvailableDelay time.Duration
|
||||
ClusterUnavailableDelay time.Duration
|
||||
MinimizeLatency bool
|
||||
SkipAdoptingResources bool
|
||||
RawResourceStatusCollection bool
|
||||
KubeConfig *restclient.Config
|
||||
ClusterAvailableDelay time.Duration
|
||||
ClusterUnavailableDelay time.Duration
|
||||
MinimizeLatency bool
|
||||
MaxConcurrentSyncReconciles int64
|
||||
MaxConcurrentStatusReconciles int64
|
||||
SkipAdoptingResources bool
|
||||
RawResourceStatusCollection bool
|
||||
}
|
||||
|
||||
func (c *ControllerConfig) LimitedScope() bool {
|
||||
|
||||
73
vendor/sigs.k8s.io/kubefed/pkg/controller/util/deletionannotation.go
generated
vendored
Normal file
73
vendor/sigs.k8s.io/kubefed/pkg/controller/util/deletionannotation.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
// DeleteOptionAnnotation contains options for delete
|
||||
// while deleting resources for member clusters.
|
||||
DeleteOptionAnnotation = "kubefed.io/deleteoption"
|
||||
)
|
||||
|
||||
// GetDeleteOptions return delete options from the annotation
|
||||
func GetDeleteOptions(obj *unstructured.Unstructured) ([]client.DeleteOption, error) {
|
||||
options := make([]client.DeleteOption, 0)
|
||||
annotations := obj.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return options, nil
|
||||
}
|
||||
|
||||
if optStr, ok := annotations[DeleteOptionAnnotation]; ok {
|
||||
opt := &metav1.DeleteOptions{}
|
||||
if err := json.Unmarshal([]byte(optStr), opt); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not deserialize delete options from annotation value '%s'", optStr)
|
||||
}
|
||||
clientOpt := &client.DeleteOptions{}
|
||||
clientOpt.GracePeriodSeconds = opt.GracePeriodSeconds
|
||||
clientOpt.PropagationPolicy = opt.PropagationPolicy
|
||||
clientOpt.Preconditions = opt.Preconditions
|
||||
options = append(options, clientOpt)
|
||||
}
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// ApplyDeleteOptions set the DeleteOptions on the annotation
|
||||
func ApplyDeleteOptions(obj *unstructured.Unstructured, opts ...client.DeleteOption) error {
|
||||
opt := client.DeleteOptions{}
|
||||
opt.ApplyOptions(opts)
|
||||
deleteOpts := opt.AsDeleteOptions()
|
||||
optBytes, err := json.Marshal(deleteOpts)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not serialize delete options from object '%v'", deleteOpts)
|
||||
}
|
||||
|
||||
annotations := obj.GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
annotations[DeleteOptionAnnotation] = string(optBytes)
|
||||
obj.SetAnnotations(annotations)
|
||||
return nil
|
||||
}
|
||||
6
vendor/sigs.k8s.io/kubefed/pkg/controller/util/federated_informer.go
generated
vendored
6
vendor/sigs.k8s.io/kubefed/pkg/controller/util/federated_informer.go
generated
vendored
@@ -23,13 +23,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
fedcommon "sigs.k8s.io/kubefed/pkg/apis/core/common"
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
@@ -139,7 +139,7 @@ func NewFederatedInformer(
|
||||
config *ControllerConfig,
|
||||
client generic.Client,
|
||||
apiResource *metav1.APIResource,
|
||||
triggerFunc func(pkgruntime.Object),
|
||||
triggerFunc func(runtimeclient.Object),
|
||||
clusterLifecycle *ClusterLifecycleHandlerFuncs) (FederatedInformer, error) {
|
||||
targetInformerFactory := func(cluster *fedv1b1.KubeFedCluster, clusterConfig *restclient.Config) (cache.Store, cache.Controller, error) {
|
||||
resourceClient, err := NewResourceClient(clusterConfig, apiResource)
|
||||
|
||||
9
vendor/sigs.k8s.io/kubefed/pkg/controller/util/genericinformer.go
generated
vendored
9
vendor/sigs.k8s.io/kubefed/pkg/controller/util/genericinformer.go
generated
vendored
@@ -21,10 +21,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -33,11 +34,11 @@ import (
|
||||
"sigs.k8s.io/kubefed/pkg/client/generic/scheme"
|
||||
)
|
||||
|
||||
func NewGenericInformer(config *rest.Config, namespace string, obj pkgruntime.Object, resyncPeriod time.Duration, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller, error) {
|
||||
func NewGenericInformer(config *rest.Config, namespace string, obj runtimeclient.Object, resyncPeriod time.Duration, triggerFunc func(runtimeclient.Object)) (cache.Store, cache.Controller, error) {
|
||||
return NewGenericInformerWithEventHandler(config, namespace, obj, resyncPeriod, NewTriggerOnAllChanges(triggerFunc))
|
||||
}
|
||||
|
||||
func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, obj pkgruntime.Object, resyncPeriod time.Duration, resourceEventHandlerFuncs *cache.ResourceEventHandlerFuncs) (cache.Store, cache.Controller, error) {
|
||||
func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, obj runtimeclient.Object, resyncPeriod time.Duration, resourceEventHandlerFuncs *cache.ResourceEventHandlerFuncs) (cache.Store, cache.Controller, error) {
|
||||
gvk, err := apiutil.GVKForObject(obj, scheme.Scheme)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -53,7 +54,7 @@ func NewGenericInformerWithEventHandler(config *rest.Config, namespace string, o
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client, err := apiutil.RESTClientForGVK(gvk, config, scheme.Codecs)
|
||||
client, err := apiutil.RESTClientForGVK(gvk, false, config, scheme.Codecs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
10
vendor/sigs.k8s.io/kubefed/pkg/controller/util/handlers.go
generated
vendored
10
vendor/sigs.k8s.io/kubefed/pkg/controller/util/handlers.go
generated
vendored
@@ -19,13 +19,13 @@ package util
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
|
||||
// on all object changes.
|
||||
func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
|
||||
func NewTriggerOnAllChanges(triggerFunc func(runtimeclient.Object)) *cache.ResourceEventHandlerFuncs {
|
||||
return &cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: func(old interface{}) {
|
||||
if deleted, ok := old.(cache.DeletedFinalStateUnknown); ok {
|
||||
@@ -35,15 +35,15 @@ func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.Resource
|
||||
return
|
||||
}
|
||||
}
|
||||
oldObj := old.(pkgruntime.Object)
|
||||
oldObj := old.(runtimeclient.Object)
|
||||
triggerFunc(oldObj)
|
||||
},
|
||||
AddFunc: func(cur interface{}) {
|
||||
curObj := cur.(pkgruntime.Object)
|
||||
curObj := cur.(runtimeclient.Object)
|
||||
triggerFunc(curObj)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curObj := cur.(pkgruntime.Object)
|
||||
curObj := cur.(runtimeclient.Object)
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
triggerFunc(curObj)
|
||||
}
|
||||
|
||||
7
vendor/sigs.k8s.io/kubefed/pkg/controller/util/meta.go
generated
vendored
7
vendor/sigs.k8s.io/kubefed/pkg/controller/util/meta.go
generated
vendored
@@ -21,11 +21,10 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Copies cluster-independent, user provided data from the given ObjectMeta struct. If in
|
||||
@@ -105,7 +104,7 @@ func ObjectMetaObjEquivalent(a, b metav1.Object) bool {
|
||||
|
||||
// Checks if cluster-independent, user provided data in ObjectMeta and Spec in two given top
|
||||
// level api objects are equivalent.
|
||||
func ObjectMetaAndSpecEquivalent(a, b runtime.Object) bool {
|
||||
func ObjectMetaAndSpecEquivalent(a, b runtimeclient.Object) bool {
|
||||
objectMetaA := reflect.ValueOf(a).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta)
|
||||
objectMetaB := reflect.ValueOf(b).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta)
|
||||
specA := reflect.ValueOf(a).Elem().FieldByName("Spec").Interface()
|
||||
@@ -113,7 +112,7 @@ func ObjectMetaAndSpecEquivalent(a, b runtime.Object) bool {
|
||||
return ObjectMetaEquivalent(objectMetaA, objectMetaB) && reflect.DeepEqual(specA, specB)
|
||||
}
|
||||
|
||||
func MetaAccessor(obj runtime.Object) metav1.Object {
|
||||
func MetaAccessor(obj runtimeclient.Object) metav1.Object {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
// This should always succeed if obj is not nil. Also,
|
||||
|
||||
96
vendor/sigs.k8s.io/kubefed/pkg/controller/util/placement.go
generated
vendored
96
vendor/sigs.k8s.io/kubefed/pkg/controller/util/placement.go
generated
vendored
@@ -20,6 +20,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
)
|
||||
|
||||
type GenericClusterReference struct {
|
||||
@@ -86,3 +89,96 @@ func SetClusterNames(obj *unstructured.Unstructured, clusterNames []string) erro
|
||||
}
|
||||
return unstructured.SetNestedSlice(obj.Object, clusters, SpecField, PlacementField, ClustersField)
|
||||
}
|
||||
|
||||
func SetClusterSelector(obj *unstructured.Unstructured, clusterSelector map[string]string) error {
|
||||
return unstructured.SetNestedStringMap(obj.Object, clusterSelector, SpecField, PlacementField, ClusterSelectorField, MatchLabelsField)
|
||||
}
|
||||
|
||||
// ComputeNamespacedPlacement determines placement for namespaced
|
||||
// federated resources (e.g. FederatedConfigMap).
|
||||
//
|
||||
// If KubeFed is deployed cluster-wide, placement is the intersection
|
||||
// of the placement for the federated resource and the placement of
|
||||
// the federated namespace containing the resource.
|
||||
//
|
||||
// If KubeFed is limited to a single namespace, placement is
|
||||
// determined as the intersection of resource and namespace placement
|
||||
// if namespace placement exists. If namespace placement does not
|
||||
// exist, resource placement will be used verbatim. This is possible
|
||||
// because the single namespace by definition must exist on member
|
||||
// clusters, so namespace placement becomes a mechanism for limiting
|
||||
// rather than allowing propagation.
|
||||
func ComputeNamespacedPlacement(resource, namespace *unstructured.Unstructured, clusters []*fedv1b1.KubeFedCluster, limitedScope bool, selectorOnly bool) (selectedClusters sets.String, err error) {
|
||||
resourceClusters, err := ComputePlacement(resource, clusters, selectorOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if namespace == nil {
|
||||
if limitedScope {
|
||||
// Use the resource placement verbatim if no federated
|
||||
// namespace is present and KubeFed is targeting a
|
||||
// single namespace.
|
||||
return resourceClusters, nil
|
||||
}
|
||||
// Resource should not exist in any member clusters.
|
||||
return sets.String{}, nil
|
||||
}
|
||||
|
||||
namespaceClusters, err := ComputePlacement(namespace, clusters, selectorOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If both namespace and resource placement exist, the desired
|
||||
// list of clusters is their intersection.
|
||||
return resourceClusters.Intersection(namespaceClusters), nil
|
||||
}
|
||||
|
||||
// ComputePlacement determines the selected clusters for a federated
|
||||
// resource.
|
||||
func ComputePlacement(resource *unstructured.Unstructured, clusters []*fedv1b1.KubeFedCluster, selectorOnly bool) (selectedClusters sets.String, err error) {
|
||||
selectedNames, err := selectedClusterNames(resource, clusters, selectorOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clusterNames := getClusterNames(clusters)
|
||||
return clusterNames.Intersection(selectedNames), nil
|
||||
}
|
||||
|
||||
func selectedClusterNames(resource *unstructured.Unstructured, clusters []*fedv1b1.KubeFedCluster, selectorOnly bool) (sets.String, error) {
|
||||
placement, err := UnmarshalGenericPlacement(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selectedNames := sets.String{}
|
||||
clusterNames := placement.ClusterNames()
|
||||
// Only use selector if clusters are nil. An empty list of
|
||||
// clusters implies no clusters are selected.
|
||||
if selectorOnly || clusterNames == nil {
|
||||
selector, err := placement.ClusterSelector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
if selector.Matches(labels.Set(cluster.Labels)) {
|
||||
selectedNames.Insert(cluster.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, clusterName := range clusterNames {
|
||||
selectedNames.Insert(clusterName)
|
||||
}
|
||||
}
|
||||
|
||||
return selectedNames, nil
|
||||
}
|
||||
|
||||
func getClusterNames(clusters []*fedv1b1.KubeFedCluster) sets.String {
|
||||
clusterNames := sets.String{}
|
||||
for _, cluster := range clusters {
|
||||
clusterNames.Insert(cluster.Name)
|
||||
}
|
||||
return clusterNames
|
||||
}
|
||||
|
||||
6
vendor/sigs.k8s.io/kubefed/pkg/controller/util/qualifiedname.go
generated
vendored
6
vendor/sigs.k8s.io/kubefed/pkg/controller/util/qualifiedname.go
generated
vendored
@@ -19,8 +19,8 @@ package util
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
meta "k8s.io/apimachinery/pkg/api/meta"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// QualifiedName comprises a resource name with an optional namespace.
|
||||
@@ -35,7 +35,7 @@ type QualifiedName struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func NewQualifiedName(obj pkgruntime.Object) QualifiedName {
|
||||
func NewQualifiedName(obj runtimeclient.Object) QualifiedName {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
// TODO(marun) This should never happen, but if it does, the
|
||||
|
||||
11
vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceinformer.go
generated
vendored
11
vendor/sigs.k8s.io/kubefed/pkg/controller/util/resourceinformer.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -32,18 +33,18 @@ import (
|
||||
)
|
||||
|
||||
// NewResourceInformer returns an unfiltered informer.
|
||||
func NewResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller) {
|
||||
func NewResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(runtimeclient.Object)) (cache.Store, cache.Controller) {
|
||||
return newResourceInformer(client, namespace, apiResource, triggerFunc, "")
|
||||
}
|
||||
|
||||
// NewManagedResourceInformer returns an informer limited to resources
|
||||
// managed by KubeFed as indicated by labeling.
|
||||
func NewManagedResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object)) (cache.Store, cache.Controller) {
|
||||
func NewManagedResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(runtimeclient.Object)) (cache.Store, cache.Controller) {
|
||||
labelSelector := labels.Set(map[string]string{ManagedByKubeFedLabelKey: ManagedByKubeFedLabelValue}).AsSelector().String()
|
||||
return newResourceInformer(client, namespace, apiResource, triggerFunc, labelSelector)
|
||||
}
|
||||
|
||||
func newResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(pkgruntime.Object), labelSelector string) (cache.Store, cache.Controller) {
|
||||
func newResourceInformer(client ResourceClient, namespace string, apiResource *metav1.APIResource, triggerFunc func(runtimeclient.Object), labelSelector string) (cache.Store, cache.Controller) {
|
||||
obj := &unstructured.Unstructured{}
|
||||
|
||||
if apiResource != nil {
|
||||
@@ -78,7 +79,7 @@ func ObjFromCache(store cache.Store, kind, key string) (*unstructured.Unstructur
|
||||
return obj.(*unstructured.Unstructured), nil
|
||||
}
|
||||
|
||||
func rawObjFromCache(store cache.Store, kind, key string) (pkgruntime.Object, error) {
|
||||
func rawObjFromCache(store cache.Store, kind, key string) (runtimeclient.Object, error) {
|
||||
cachedObj, exist, err := store.GetByKey(key)
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Failed to query %s store for %q", kind, key)
|
||||
@@ -88,5 +89,5 @@ func rawObjFromCache(store cache.Store, kind, key string) (pkgruntime.Object, er
|
||||
if !exist {
|
||||
return nil, nil
|
||||
}
|
||||
return cachedObj.(pkgruntime.Object).DeepCopyObject(), nil
|
||||
return cachedObj.(runtimeclient.Object).DeepCopyObject().(runtimeclient.Object), nil
|
||||
}
|
||||
|
||||
108
vendor/sigs.k8s.io/kubefed/pkg/controller/util/worker.go
generated
vendored
108
vendor/sigs.k8s.io/kubefed/pkg/controller/util/worker.go
generated
vendored
@@ -19,10 +19,10 @@ package util
|
||||
import (
|
||||
"time"
|
||||
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type ReconcileFunc func(qualifiedName QualifiedName) ReconciliationStatus
|
||||
@@ -32,12 +32,19 @@ type ReconcileWorker interface {
|
||||
EnqueueForClusterSync(qualifiedName QualifiedName)
|
||||
EnqueueForError(qualifiedName QualifiedName)
|
||||
EnqueueForRetry(qualifiedName QualifiedName)
|
||||
EnqueueObject(obj pkgruntime.Object)
|
||||
EnqueueObject(obj runtimeclient.Object)
|
||||
EnqueueWithDelay(qualifiedName QualifiedName, delay time.Duration)
|
||||
Run(stopChan <-chan struct{})
|
||||
SetDelay(retryDelay, clusterSyncDelay time.Duration)
|
||||
}
|
||||
|
||||
type WorkerOptions struct {
|
||||
WorkerTiming
|
||||
|
||||
// MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1.
|
||||
MaxConcurrentReconciles int
|
||||
}
|
||||
|
||||
type WorkerTiming struct {
|
||||
Interval time.Duration
|
||||
RetryDelay time.Duration
|
||||
@@ -47,10 +54,14 @@ type WorkerTiming struct {
|
||||
}
|
||||
|
||||
type asyncWorker struct {
|
||||
name string
|
||||
|
||||
reconcile ReconcileFunc
|
||||
|
||||
timing WorkerTiming
|
||||
|
||||
maxConcurrentReconciles int
|
||||
|
||||
// For triggering reconciliation of a single resource. This is
|
||||
// used when there is an add/update/delete operation on a resource
|
||||
// in either the API of the cluster hosting KubeFed or in the API
|
||||
@@ -64,25 +75,30 @@ type asyncWorker struct {
|
||||
backoff *flowcontrol.Backoff
|
||||
}
|
||||
|
||||
func NewReconcileWorker(reconcile ReconcileFunc, timing WorkerTiming) ReconcileWorker {
|
||||
if timing.Interval == 0 {
|
||||
timing.Interval = time.Second * 1
|
||||
func NewReconcileWorker(name string, reconcile ReconcileFunc, options WorkerOptions) ReconcileWorker {
|
||||
if options.Interval == 0 {
|
||||
options.Interval = time.Second * 1
|
||||
}
|
||||
if timing.RetryDelay == 0 {
|
||||
timing.RetryDelay = time.Second * 10
|
||||
if options.RetryDelay == 0 {
|
||||
options.RetryDelay = time.Second * 10
|
||||
}
|
||||
if timing.InitialBackoff == 0 {
|
||||
timing.InitialBackoff = time.Second * 5
|
||||
if options.InitialBackoff == 0 {
|
||||
options.InitialBackoff = time.Second * 5
|
||||
}
|
||||
if timing.MaxBackoff == 0 {
|
||||
timing.MaxBackoff = time.Minute
|
||||
if options.MaxBackoff == 0 {
|
||||
options.MaxBackoff = time.Minute
|
||||
}
|
||||
if options.MaxConcurrentReconciles == 0 {
|
||||
options.MaxConcurrentReconciles = 1
|
||||
}
|
||||
return &asyncWorker{
|
||||
reconcile: reconcile,
|
||||
timing: timing,
|
||||
deliverer: NewDelayingDeliverer(),
|
||||
queue: workqueue.New(),
|
||||
backoff: flowcontrol.NewBackOff(timing.InitialBackoff, timing.MaxBackoff),
|
||||
name: name,
|
||||
reconcile: reconcile,
|
||||
timing: options.WorkerTiming,
|
||||
maxConcurrentReconciles: options.MaxConcurrentReconciles,
|
||||
deliverer: NewDelayingDeliverer(),
|
||||
queue: workqueue.NewNamed(name),
|
||||
backoff: flowcontrol.NewBackOff(options.InitialBackoff, options.MaxBackoff),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,7 +118,7 @@ func (w *asyncWorker) EnqueueForClusterSync(qualifiedName QualifiedName) {
|
||||
w.deliver(qualifiedName, w.timing.ClusterSyncDelay, false)
|
||||
}
|
||||
|
||||
func (w *asyncWorker) EnqueueObject(obj pkgruntime.Object) {
|
||||
func (w *asyncWorker) EnqueueObject(obj runtimeclient.Object) {
|
||||
qualifiedName := NewQualifiedName(obj)
|
||||
w.Enqueue(qualifiedName)
|
||||
}
|
||||
@@ -114,9 +130,15 @@ func (w *asyncWorker) EnqueueWithDelay(qualifiedName QualifiedName, delay time.D
|
||||
func (w *asyncWorker) Run(stopChan <-chan struct{}) {
|
||||
StartBackoffGC(w.backoff, stopChan)
|
||||
w.deliverer.StartWithHandler(func(item *DelayingDelivererItem) {
|
||||
w.queue.Add(item)
|
||||
qualifiedName, ok := item.Value.(*QualifiedName)
|
||||
if ok {
|
||||
w.queue.Add(*qualifiedName)
|
||||
}
|
||||
})
|
||||
go wait.Until(w.worker, w.timing.Interval, stopChan)
|
||||
|
||||
for i := 0; i < w.maxConcurrentReconciles; i++ {
|
||||
go wait.Until(w.worker, w.timing.Interval, stopChan)
|
||||
}
|
||||
|
||||
// Ensure all goroutines are cleaned up when the stop channel closes
|
||||
go func() {
|
||||
@@ -145,26 +167,32 @@ func (w *asyncWorker) deliver(qualifiedName QualifiedName, delay time.Duration,
|
||||
}
|
||||
|
||||
func (w *asyncWorker) worker() {
|
||||
for {
|
||||
obj, quit := w.queue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
|
||||
item := obj.(*DelayingDelivererItem)
|
||||
qualifiedName := item.Value.(*QualifiedName)
|
||||
status := w.reconcile(*qualifiedName)
|
||||
w.queue.Done(item)
|
||||
|
||||
switch status {
|
||||
case StatusAllOK:
|
||||
break
|
||||
case StatusError:
|
||||
w.EnqueueForError(*qualifiedName)
|
||||
case StatusNeedsRecheck:
|
||||
w.EnqueueForRetry(*qualifiedName)
|
||||
case StatusNotSynced:
|
||||
w.EnqueueForClusterSync(*qualifiedName)
|
||||
}
|
||||
for w.reconcileOnce() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *asyncWorker) reconcileOnce() bool {
|
||||
obj, quit := w.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer w.queue.Done(obj)
|
||||
|
||||
qualifiedName, ok := obj.(QualifiedName)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
status := w.reconcile(qualifiedName)
|
||||
switch status {
|
||||
case StatusAllOK:
|
||||
break
|
||||
case StatusError:
|
||||
w.EnqueueForError(qualifiedName)
|
||||
case StatusNeedsRecheck:
|
||||
w.EnqueueForRetry(qualifiedName)
|
||||
case StatusNotSynced:
|
||||
w.EnqueueForClusterSync(qualifiedName)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user