Upgrade k8s package verison (#5358)
* upgrade k8s package version Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> * Script upgrade and code formatting. Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
60
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
60
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -1,40 +1,28 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- ncdc
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- ncdc
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
- hongchaodeng
|
||||
- krousey
|
||||
- xiang90
|
||||
- mml
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- jessfraz
|
||||
- mfojtik
|
||||
- sdminonne
|
||||
- ncdc
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
- ingvagabund
|
||||
- ncdc
|
||||
|
||||
207
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
207
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@@ -17,13 +17,14 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// This file implements a low-level controller that is used in
|
||||
@@ -198,17 +199,17 @@ func (c *controller) processLoop() {
|
||||
// can't return an error. The handlers MUST NOT modify the objects
|
||||
// received; this concerns not only the top level of structure but all
|
||||
// the data structures reachable from it.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
// - OnAdd is called when an object is added.
|
||||
// - OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// - OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
@@ -304,15 +305,14 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
func NewInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@@ -322,25 +322,24 @@ func NewInformer(
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
// NewIndexerInformer returns an Indexer and a Controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * indexers is the indexer for the received object type.
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
// - indexers is the indexer for the received object type.
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@@ -351,29 +350,124 @@ func NewIndexerInformer(
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
|
||||
}
|
||||
|
||||
// TransformFunc allows for transforming an object before it will be processed
|
||||
// and put into the controller cache and before the corresponding handlers will
|
||||
// be called on it.
|
||||
// TransformFunc (similarly to ResourceEventHandler functions) should be able
|
||||
// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown
|
||||
//
|
||||
// The most common usage pattern is to clean-up some parts of the object to
|
||||
// reduce component memory usage if a given component doesn't care about them.
|
||||
// given controller doesn't care for them
|
||||
type TransformFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// NewTransformingInformer returns a Store and a controller for populating
|
||||
// the store while also providing event notifications. You should only used
|
||||
// the returned Store for Get/List operations; Add/Modify/Deletes will cause
|
||||
// the event notifications to be faulty.
|
||||
// The given transform function will be called on all objects before they will
|
||||
// put into the Store and corresponding Add/Modify/Delete handlers will
|
||||
// be invoked for them.
|
||||
func NewTransformingInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
transformer TransformFunc,
|
||||
) (Store, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
|
||||
}
|
||||
|
||||
// NewTransformingIndexerInformer returns an Indexer and a controller for
|
||||
// populating the index while also providing event notifications. You should
|
||||
// only used the returned Index for Get/List operations; Add/Modify/Deletes
|
||||
// will cause the event notifications to be faulty.
|
||||
// The given transform function will be called on all objects before they will
|
||||
// be put into the Index and corresponding Add/Modify/Delete handlers will
|
||||
// be invoked for them.
|
||||
func NewTransformingIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers Indexers,
|
||||
transformer TransformFunc,
|
||||
) (Indexer, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
|
||||
}
|
||||
|
||||
// Multiplexes updates in the form of a list of Deltas into a Store, and informs
|
||||
// a given handler of events OnUpdate, OnAdd, OnDelete
|
||||
func processDeltas(
|
||||
// Object which receives event notifications from the given deltas
|
||||
handler ResourceEventHandler,
|
||||
clientState Store,
|
||||
transformer TransformFunc,
|
||||
deltas Deltas,
|
||||
) error {
|
||||
// from oldest to newest
|
||||
for _, d := range deltas {
|
||||
obj := d.Object
|
||||
if transformer != nil {
|
||||
var err error
|
||||
obj, err = transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(obj); err == nil && exists {
|
||||
if err := clientState.Update(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnUpdate(old, obj)
|
||||
} else {
|
||||
if err := clientState.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnAdd(obj)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnDelete(obj)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
// - clientState is the store you want to populate
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
clientState Store,
|
||||
transformer TransformFunc,
|
||||
) Controller {
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
@@ -391,29 +485,10 @@ func newInformer(
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(h, clientState, transformer, deltas)
|
||||
}
|
||||
return nil
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
},
|
||||
}
|
||||
return New(cfg)
|
||||
|
||||
80
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
80
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -20,10 +20,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are
|
||||
@@ -72,11 +74,11 @@ type DeltaFIFOOptions struct {
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of some of the objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
// - You want to process every object change (delta) at most once.
|
||||
// - When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// - You want to process the deletion of some of the objects.
|
||||
// - You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but they
|
||||
@@ -121,7 +123,7 @@ type DeltaFIFO struct {
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
// Currently, not used to gate any of CRUD operations.
|
||||
closed bool
|
||||
|
||||
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
|
||||
@@ -153,7 +155,7 @@ const (
|
||||
// change happened, and the object's state after* that change.
|
||||
//
|
||||
// [*] Unless the change is a deletion, and then you'll get the final
|
||||
// state of the object before it was deleted.
|
||||
// state of the object before it was deleted.
|
||||
type Delta struct {
|
||||
Type DeltaType
|
||||
Object interface{}
|
||||
@@ -174,24 +176,24 @@ type Deltas []Delta
|
||||
// modifications.
|
||||
//
|
||||
// TODO: consider merging keyLister with this object, tracking a list of
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
//
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
|
||||
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
//
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
//
|
||||
// Also see the comment on DeltaFIFO.
|
||||
//
|
||||
@@ -340,7 +342,7 @@ func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
|
||||
}
|
||||
id, err := f.KeyOf(deltas.Newest().Object)
|
||||
id, err := f.KeyOf(deltas)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
@@ -373,13 +375,8 @@ func dedupDeltas(deltas Deltas) Deltas {
|
||||
a := &deltas[n-1]
|
||||
b := &deltas[n-2]
|
||||
if out := isDup(a, b); out != nil {
|
||||
// `a` and `b` are duplicates. Only keep the one returned from isDup().
|
||||
// TODO: This extra array allocation and copy seems unnecessary if
|
||||
// all we do to dedup is compare the new delta with the last element
|
||||
// in `items`, which could be done by mutating `items` directly.
|
||||
// Might be worth profiling and investigating if it is safe to optimize.
|
||||
d := append(Deltas{}, deltas[:n-2]...)
|
||||
return append(d, *out)
|
||||
deltas[n-2] = *out
|
||||
return deltas[:n-1]
|
||||
}
|
||||
return deltas
|
||||
}
|
||||
@@ -461,8 +458,8 @@ func (f *DeltaFIFO) listLocked() []interface{} {
|
||||
func (f *DeltaFIFO) ListKeys() []string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]string, 0, len(f.items))
|
||||
for key := range f.items {
|
||||
list := make([]string, 0, len(f.queue))
|
||||
for _, key := range f.queue {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
@@ -531,6 +528,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
depth := len(f.queue)
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
@@ -541,6 +539,18 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
// Only log traces if the queue depth is greater than 10 and it takes more than
|
||||
// 100 milliseconds to process one item from the queue.
|
||||
// Queue depth never goes high because processing an item is locking the queue,
|
||||
// and new items can't be added until processing finish.
|
||||
// https://github.com/kubernetes/kubernetes/issues/103789
|
||||
if depth > 10 {
|
||||
trace := utiltrace.New("DeltaFIFO Pop Process",
|
||||
utiltrace.Field{Key: "ID", Value: id},
|
||||
utiltrace.Field{Key: "Depth", Value: depth},
|
||||
utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"})
|
||||
defer trace.LogIfLong(100 * time.Millisecond)
|
||||
}
|
||||
err := process(item)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
@@ -562,7 +572,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// of the Deltas associated with K. Otherwise the pre-existing keys
|
||||
// are those listed by `f.knownObjects` and the current object of K is
|
||||
// what `f.knownObjects.GetByKey(K)` returns.
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
keys := make(sets.String, len(list))
|
||||
|
||||
17
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
17
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@@ -20,18 +20,19 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
//
|
||||
// Note that the ExpirationCache is inherently slower than a normal
|
||||
// threadSafeStore because it takes a write lock every time it checks if
|
||||
// an item has expired.
|
||||
|
||||
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type fakeThreadSafeMap struct {
|
||||
|
||||
16
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
16
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@@ -103,10 +103,11 @@ func Pop(queue Queue) interface{} {
|
||||
// recent version will be processed. This can't be done with a channel
|
||||
//
|
||||
// FIFO solves this use case:
|
||||
// * You want to process every object (exactly) once.
|
||||
// * You want to process the most recent version of the object when you process it.
|
||||
// * You do not want to process deleted objects, they should be removed from the queue.
|
||||
// * You do not want to periodically reprocess objects.
|
||||
// - You want to process every object (exactly) once.
|
||||
// - You want to process the most recent version of the object when you process it.
|
||||
// - You do not want to process deleted objects, they should be removed from the queue.
|
||||
// - You do not want to periodically reprocess objects.
|
||||
//
|
||||
// Compare with DeltaFIFO for other use cases.
|
||||
type FIFO struct {
|
||||
lock sync.RWMutex
|
||||
@@ -127,7 +128,7 @@ type FIFO struct {
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
// Currently, not used to gate any of CRUD operations.
|
||||
closed bool
|
||||
}
|
||||
|
||||
@@ -263,10 +264,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready and processes it. If multiple items are
|
||||
|
||||
5
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
@@ -304,10 +304,7 @@ func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
|
||||
func (h *Heap) IsClosed() bool {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
if h.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return h.closed
|
||||
}
|
||||
|
||||
// NewHeap returns a Heap which can be used to queue up items to process.
|
||||
|
||||
14
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
14
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
@@ -28,10 +28,10 @@ import (
|
||||
// Delete).
|
||||
//
|
||||
// There are three kinds of strings here:
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Index returns the stored objects whose set of indexed values
|
||||
@@ -47,7 +47,7 @@ type Indexer interface {
|
||||
// ByIndex returns the stored objects whose set of indexed values
|
||||
// for the named index includes the given indexed value
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
// GetIndexer return the indexers
|
||||
// GetIndexers return the indexers
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
@@ -78,7 +78,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
}
|
||||
|
||||
const (
|
||||
// NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field.
|
||||
// NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field.
|
||||
NamespaceIndex string = "namespace"
|
||||
)
|
||||
|
||||
@@ -94,7 +94,7 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
|
||||
// Index maps the indexed value to a set of keys in the store that match on that value
|
||||
type Index map[string]sets.String
|
||||
|
||||
// Indexers maps a name to a IndexFunc
|
||||
// Indexers maps a name to an IndexFunc
|
||||
type Indexers map[string]IndexFunc
|
||||
|
||||
// Indices maps a name to an Index
|
||||
|
||||
2
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@@ -99,7 +99,7 @@ func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
for {
|
||||
if d.lastRotated.IsZero() {
|
||||
d.lastRotated = time.Now()
|
||||
} else if time.Now().Sub(d.lastRotated) > d.retainDuration {
|
||||
} else if time.Since(d.lastRotated) > d.retainDuration {
|
||||
d.retainedCachedObjs = d.cachedObjs
|
||||
d.cachedObjs = nil
|
||||
d.lastRotated = time.Now()
|
||||
|
||||
301
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
301
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -40,6 +39,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
@@ -69,8 +69,10 @@ type Reflector struct {
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch.
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
|
||||
MaxInternalErrorRetryDuration time.Duration
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
@@ -231,7 +233,7 @@ var (
|
||||
|
||||
// Used to indicate that watching stopped because of a signal from the stop
|
||||
// channel passed in from a client of the reflector.
|
||||
errorStopRequested = errors.New("Stop requested")
|
||||
errorStopRequested = errors.New("stop requested")
|
||||
)
|
||||
|
||||
// resyncChan returns a channel which will receive something when a resync is
|
||||
@@ -253,111 +255,9 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var paginatedResult bool
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
switch {
|
||||
case r.WatchListPageSize != 0:
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
case r.paginatedResult:
|
||||
// We got a paginated result initially. Assume this resource and server honor
|
||||
// paging requests (i.e. watch cache is probably disabled) and leave the default
|
||||
// pager size set.
|
||||
case options.ResourceVersion != "" && options.ResourceVersion != "0":
|
||||
// User didn't explicitly request pagination.
|
||||
//
|
||||
// With ResourceVersion != "", we have a possibility to list from watch cache,
|
||||
// but we do that (for ResourceVersion != "0") only if Limit is unset.
|
||||
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
|
||||
// switch off pagination to force listing from watch cache (if enabled).
|
||||
// With the existing semantic of RV (result is at least as fresh as provided RV),
|
||||
// this is correct and doesn't lead to going back in time.
|
||||
//
|
||||
// We also don't turn off pagination for ResourceVersion="0", since watch cache
|
||||
// is ignoring Limit in that case anyway, and if watch cache is not enabled
|
||||
// we don't introduce regression.
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, the full list might fail because the
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
// However, we want to do that only for the initial list (which is the only case
|
||||
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
|
||||
// situations we may force listing directly from etcd (by setting ResourceVersion="")
|
||||
// which will return paginated result, even if watch cache is enabled. However, in
|
||||
// that case, we still want to prefer sending requests to watch cache if possible.
|
||||
//
|
||||
// Paginated result returned for request with ResourceVersion="0" mean that watch
|
||||
// cache is disabled and there are a lot of objects of a given type. In such case,
|
||||
// there is no need to prefer listing from watch cache.
|
||||
if options.ResourceVersion == "0" && paginatedResult {
|
||||
r.paginatedResult = true
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("unable to sync list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}(); err != nil {
|
||||
err := r.list(stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -389,6 +289,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
}()
|
||||
|
||||
retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock)
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
@@ -398,9 +299,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: r.LastSyncResourceVersion(),
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
@@ -417,14 +318,17 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
if utilnet.IsConnectionRefused(err) {
|
||||
// Do the same for "429" errors.
|
||||
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh)
|
||||
retry.After(err)
|
||||
if err != nil {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
@@ -432,6 +336,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case apierrors.IsTooManyRequests(err):
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
case apierrors.IsInternalError(err) && retry.ShouldRetry():
|
||||
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err)
|
||||
continue
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
@@ -441,6 +352,114 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
// list simply lists all items and records a resource version obtained from the server at the moment of the call.
|
||||
// the resource version can be used for further progress notification (aka. watch).
|
||||
func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
var resourceVersion string
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var paginatedResult bool
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
switch {
|
||||
case r.WatchListPageSize != 0:
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
case r.paginatedResult:
|
||||
// We got a paginated result initially. Assume this resource and server honor
|
||||
// paging requests (i.e. watch cache is probably disabled) and leave the default
|
||||
// pager size set.
|
||||
case options.ResourceVersion != "" && options.ResourceVersion != "0":
|
||||
// User didn't explicitly request pagination.
|
||||
//
|
||||
// With ResourceVersion != "", we have a possibility to list from watch cache,
|
||||
// but we do that (for ResourceVersion != "0") only if Limit is unset.
|
||||
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
|
||||
// switch off pagination to force listing from watch cache (if enabled).
|
||||
// With the existing semantic of RV (result is at least as fresh as provided RV),
|
||||
// this is correct and doesn't lead to going back in time.
|
||||
//
|
||||
// We also don't turn off pagination for ResourceVersion="0", since watch cache
|
||||
// is ignoring Limit in that case anyway, and if watch cache is not enabled
|
||||
// we don't introduce regression.
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, the full list might fail because the
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
// However, we want to do that only for the initial list (which is the only case
|
||||
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
|
||||
// situations we may force listing directly from etcd (by setting ResourceVersion="")
|
||||
// which will return paginated result, even if watch cache is enabled. However, in
|
||||
// that case, we still want to prefer sending requests to watch cache if possible.
|
||||
//
|
||||
// Paginated result returned for request with ResourceVersion="0" mean that watch
|
||||
// cache is disabled and there are a lot of objects of a given type. In such case,
|
||||
// there is no need to prefer listing from watch cache.
|
||||
if options.ResourceVersion == "0" && paginatedResult {
|
||||
r.paginatedResult = true
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("unable to sync list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncWith replaces the store's items with the given list.
|
||||
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
|
||||
found := make([]interface{}, 0, len(items))
|
||||
@@ -450,8 +469,19 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
|
||||
return r.store.Replace(found, resourceVersion)
|
||||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
// watchHandler watches w and sets setLastSyncResourceVersion
|
||||
func watchHandler(start time.Time,
|
||||
w watch.Interface,
|
||||
store Store,
|
||||
expectedType reflect.Type,
|
||||
expectedGVK *schema.GroupVersionKind,
|
||||
name string,
|
||||
expectedTypeName string,
|
||||
setLastSyncResourceVersion func(string),
|
||||
clock clock.Clock,
|
||||
errc chan error,
|
||||
stopCh <-chan struct{},
|
||||
) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
@@ -472,62 +502,61 @@ loop:
|
||||
if event.Type == watch.Error {
|
||||
return apierrors.FromObject(event.Object)
|
||||
}
|
||||
if r.expectedType != nil {
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
if expectedType != nil {
|
||||
if e, a := expectedType, reflect.TypeOf(event.Object); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if r.expectedGVK != nil {
|
||||
if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a))
|
||||
if expectedGVK != nil {
|
||||
if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
meta, err := meta.Accessor(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
|
||||
continue
|
||||
}
|
||||
newResourceVersion := meta.GetResourceVersion()
|
||||
resourceVersion := meta.GetResourceVersion()
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
err := r.store.Add(event.Object)
|
||||
err := store.Add(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err))
|
||||
}
|
||||
case watch.Modified:
|
||||
err := r.store.Update(event.Object)
|
||||
err := store.Update(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err))
|
||||
}
|
||||
case watch.Deleted:
|
||||
// TODO: Will any consumers need access to the "last known
|
||||
// state", which is passed in event.Object? If so, may need
|
||||
// to change this.
|
||||
err := r.store.Delete(event.Object)
|
||||
err := store.Delete(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err))
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
if rvu, ok := r.store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(newResourceVersion)
|
||||
setLastSyncResourceVersion(resourceVersion)
|
||||
if rvu, ok := store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(resourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Since(start)
|
||||
watchDuration := clock.Since(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name)
|
||||
}
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount)
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
78
vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go
generated
vendored
Normal file
78
vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/utils/clock"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RetryWithDeadline interface {
|
||||
After(error)
|
||||
ShouldRetry() bool
|
||||
}
|
||||
|
||||
type retryWithDeadlineImpl struct {
|
||||
firstErrorTime time.Time
|
||||
lastErrorTime time.Time
|
||||
maxRetryDuration time.Duration
|
||||
minResetPeriod time.Duration
|
||||
isRetryable func(error) bool
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline {
|
||||
return &retryWithDeadlineImpl{
|
||||
firstErrorTime: time.Time{},
|
||||
lastErrorTime: time.Time{},
|
||||
maxRetryDuration: maxRetryDuration,
|
||||
minResetPeriod: minResetPeriod,
|
||||
isRetryable: isRetryable,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *retryWithDeadlineImpl) reset() {
|
||||
r.firstErrorTime = time.Time{}
|
||||
r.lastErrorTime = time.Time{}
|
||||
}
|
||||
|
||||
func (r *retryWithDeadlineImpl) After(err error) {
|
||||
if r.isRetryable(err) {
|
||||
if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod {
|
||||
r.reset()
|
||||
}
|
||||
|
||||
if r.firstErrorTime.IsZero() {
|
||||
r.firstErrorTime = r.clock.Now()
|
||||
}
|
||||
r.lastErrorTime = r.clock.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *retryWithDeadlineImpl) ShouldRetry() bool {
|
||||
if r.maxRetryDuration <= time.Duration(0) {
|
||||
return false
|
||||
}
|
||||
|
||||
if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration {
|
||||
return true
|
||||
}
|
||||
|
||||
r.reset()
|
||||
return false
|
||||
}
|
||||
121
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
121
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -17,16 +17,17 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/buffer"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -180,6 +181,20 @@ type SharedInformer interface {
|
||||
// The handler should return quickly - any expensive processing should be
|
||||
// offloaded.
|
||||
SetWatchErrorHandler(handler WatchErrorHandler) error
|
||||
|
||||
// The TransformFunc is called for each object which is about to be stored.
|
||||
//
|
||||
// This function is intended for you to take the opportunity to
|
||||
// remove, transform, or normalize fields. One use case is to strip unused
|
||||
// metadata fields out of objects to save on RAM cost.
|
||||
//
|
||||
// Must be set before starting the informer.
|
||||
//
|
||||
// Note: Since the object given to the handler may be already shared with
|
||||
// other goroutines, it is advisable to copy the object being
|
||||
// transform before mutating it at all and returning the copy to prevent
|
||||
// data races.
|
||||
SetTransform(handler TransformFunc) error
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
@@ -244,7 +259,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS
|
||||
return false
|
||||
}
|
||||
|
||||
klog.Infof("Caches are synced for %s ", controllerName)
|
||||
klog.Infof("Caches are synced for %s", controllerName)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -318,6 +333,8 @@ type sharedIndexInformer struct {
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
|
||||
transform TransformFunc
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
@@ -365,9 +382,25 @@ func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
s.transform = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
if s.HasStarted() {
|
||||
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
|
||||
return
|
||||
}
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
@@ -410,6 +443,12 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasStarted() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.started
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
@@ -528,45 +567,47 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
s.cacheMutationDetector.AddObject(d.Object)
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(s, s.indexer, s.transform, deltas)
|
||||
}
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
}
|
||||
|
||||
isSync := false
|
||||
switch {
|
||||
case d.Type == Sync:
|
||||
// Sync events are only propagated to listeners that requested resync
|
||||
isSync = true
|
||||
case d.Type == Replaced:
|
||||
if accessor, err := meta.Accessor(d.Object); err == nil {
|
||||
if oldAccessor, err := meta.Accessor(old); err == nil {
|
||||
// Replaced events that didn't change resourceVersion are treated as resync events
|
||||
// and only propagated to listeners that requested resync
|
||||
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
|
||||
}
|
||||
}
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, false)
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnAdd(obj interface{}) {
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.cacheMutationDetector.AddObject(obj)
|
||||
s.processor.distribute(addNotification{newObj: obj}, false)
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnUpdate(old, new interface{}) {
|
||||
isSync := false
|
||||
|
||||
// If is a Sync event, isSync should be true
|
||||
// If is a Replaced event, isSync is true if resource version is unchanged.
|
||||
// If RV is unchanged: this is a Sync/Replaced event, so isSync is true
|
||||
|
||||
if accessor, err := meta.Accessor(new); err == nil {
|
||||
if oldAccessor, err := meta.Accessor(old); err == nil {
|
||||
// Events that didn't change resourceVersion are treated as resync events
|
||||
// and only propagated to listeners that requested resync
|
||||
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.cacheMutationDetector.AddObject(new)
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync)
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnDelete(old interface{}) {
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.processor.distribute(deleteNotification{oldObj: old}, false)
|
||||
}
|
||||
|
||||
// sharedProcessor has a collection of processorListener and can
|
||||
@@ -694,9 +735,9 @@ type processorListener struct {
|
||||
// full resync from the shared informer, but modified by two
|
||||
// adjustments. One is imposing a lower bound,
|
||||
// `minimumResyncPeriod`. The other is another lower bound, the
|
||||
// sharedProcessor's `resyncCheckPeriod`, that is imposed (a) only
|
||||
// sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only
|
||||
// in AddEventHandlerWithResyncPeriod invocations made after the
|
||||
// sharedProcessor starts and (b) only if the informer does
|
||||
// sharedIndexInformer starts and (b) only if the informer does
|
||||
// resyncs at all.
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is the threshold that will be used in the logic
|
||||
|
||||
18
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
18
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
@@ -85,6 +85,11 @@ func (k KeyError) Error() string {
|
||||
return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
|
||||
}
|
||||
|
||||
// Unwrap implements errors.Unwrap
|
||||
func (k KeyError) Unwrap() error {
|
||||
return k.Err
|
||||
}
|
||||
|
||||
// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
|
||||
// the object but not the object itself.
|
||||
type ExplicitKey string
|
||||
@@ -194,8 +199,11 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
return c.cacheStorage.Index(indexName, obj)
|
||||
}
|
||||
|
||||
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexKey)
|
||||
// IndexKeys returns the storage keys of the stored objects whose set of
|
||||
// indexed values for the named index includes the given indexed value.
|
||||
// The returned keys are suitable to pass to GetByKey().
|
||||
func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexedValue)
|
||||
}
|
||||
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
@@ -203,8 +211,10 @@ func (c *cache) ListIndexFuncValues(indexName string) []string {
|
||||
return c.cacheStorage.ListIndexFuncValues(indexName)
|
||||
}
|
||||
|
||||
func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
return c.cacheStorage.ByIndex(indexName, indexKey)
|
||||
// ByIndex returns the stored objects whose set of indexed values
|
||||
// for the named index includes the given indexed value.
|
||||
func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) {
|
||||
return c.cacheStorage.ByIndex(indexName, indexedValue)
|
||||
}
|
||||
|
||||
func (c *cache) AddIndexers(newIndexers Indexers) error {
|
||||
|
||||
100
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
100
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@@ -47,9 +47,9 @@ type ThreadSafeStore interface {
|
||||
ListKeys() []string
|
||||
Replace(map[string]interface{}, string)
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
IndexKeys(indexName, indexedValue string) ([]string, error)
|
||||
ListIndexFuncValues(name string) []string
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
@@ -71,11 +71,7 @@ type threadSafeMap struct {
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Add(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
c.Update(key, obj)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Update(key string, obj interface{}) {
|
||||
@@ -90,7 +86,7 @@ func (c *threadSafeMap) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if obj, exists := c.items[key]; exists {
|
||||
c.deleteFromIndices(obj, key)
|
||||
c.updateIndices(obj, nil, key)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
@@ -251,61 +247,73 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
|
||||
// updateIndices modifies the objects location in the managed indexes:
|
||||
// - for create you must provide only the newObj
|
||||
// - for update you must provide both the oldObj and the newObj
|
||||
// - for delete you must provide only the oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
// if we got an old object, we need to remove it before we add it again
|
||||
if oldObj != nil {
|
||||
c.deleteFromIndices(oldObj, key)
|
||||
}
|
||||
var oldIndexValues, indexValues []string
|
||||
var err error
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(newObj)
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
c.indices[name] = index
|
||||
}
|
||||
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
continue
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
c.deleteKeyFromIndex(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
c.addKeyToIndex(key, value, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteFromIndices removes the object from each of the managed indexes
|
||||
// it is intended to be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
continue
|
||||
}
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
set.Delete(key)
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user