use istio client-go library instead of knative (#1661)
use istio client-go library instead of knative bump kubernetes dependency version change code coverage to codecov
This commit is contained in:
3
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
3
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -1,3 +1,5 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
@@ -22,7 +24,6 @@ reviewers:
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
- kargakis
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- eparis
|
||||
|
||||
77
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
77
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@@ -28,9 +28,9 @@ import (
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Queue's Pop() method.
|
||||
// The queue for your objects - has to be a DeltaFIFO due to
|
||||
// assumptions in the implementation. Your Process() function
|
||||
// should accept the output of this Queue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
@@ -79,6 +79,7 @@ type controller struct {
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type Controller interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
@@ -130,6 +131,8 @@ func (c *controller) HasSynced() bool {
|
||||
}
|
||||
|
||||
func (c *controller) LastSyncResourceVersion() string {
|
||||
c.reflectorMutex.RLock()
|
||||
defer c.reflectorMutex.RUnlock()
|
||||
if c.reflector == nil {
|
||||
return ""
|
||||
}
|
||||
@@ -149,7 +152,7 @@ func (c *controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if err == FIFOClosedError {
|
||||
if err == ErrFIFOClosed {
|
||||
return
|
||||
}
|
||||
if c.config.RetryOnError {
|
||||
@@ -285,45 +288,7 @@ func NewInformer(
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
@@ -352,6 +317,30 @@ func NewIndexerInformer(
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
clientState Store,
|
||||
) Controller {
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
@@ -390,5 +379,5 @@ func NewIndexerInformer(
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
return New(cfg)
|
||||
}
|
||||
|
||||
27
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
27
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -160,7 +160,7 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
return f.keyFunc(obj)
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
@@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta {
|
||||
return b
|
||||
}
|
||||
|
||||
// willObjectBeDeletedLocked returns true only if the last delta for the
|
||||
// given object is Delete. Caller must lock first.
|
||||
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
|
||||
deltas := f.items[id]
|
||||
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
|
||||
}
|
||||
|
||||
// queueActionLocked appends to the delta list for the object.
|
||||
// Caller must lock first.
|
||||
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
|
||||
@@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// If object is supposed to be deleted (last event is Deleted),
|
||||
// then we should ignore Sync events, because it would result in
|
||||
// recreation of this object.
|
||||
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
@@ -389,7 +375,7 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
||||
return d, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
// IsClosed checks if the queue is closed
|
||||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
@@ -417,7 +403,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
return nil, ErrFIFOClosed
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
@@ -466,6 +452,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
@@ -474,6 +461,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -481,7 +469,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list)
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -589,6 +579,7 @@ type KeyGetter interface {
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
// Change type definition
|
||||
const (
|
||||
Added DeltaType = "Added"
|
||||
Updated DeltaType = "Updated"
|
||||
|
||||
63
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
63
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@@ -48,14 +48,14 @@ type ExpirationCache struct {
|
||||
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
|
||||
// so unittests don't rely on the system clock.
|
||||
type ExpirationPolicy interface {
|
||||
IsExpired(obj *timestampedEntry) bool
|
||||
IsExpired(obj *TimestampedEntry) bool
|
||||
}
|
||||
|
||||
// TTLPolicy implements a ttl based ExpirationPolicy.
|
||||
type TTLPolicy struct {
|
||||
// >0: Expire entries with an age > ttl
|
||||
// <=0: Don't expire any entry
|
||||
Ttl time.Duration
|
||||
TTL time.Duration
|
||||
|
||||
// Clock used to calculate ttl expiration
|
||||
Clock clock.Clock
|
||||
@@ -63,26 +63,30 @@ type TTLPolicy struct {
|
||||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
// determine its age.
|
||||
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
|
||||
func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL
|
||||
}
|
||||
|
||||
// timestampedEntry is the only type allowed in a ExpirationCache.
|
||||
type timestampedEntry struct {
|
||||
obj interface{}
|
||||
timestamp time.Time
|
||||
// TimestampedEntry is the only type allowed in a ExpirationCache.
|
||||
// Keep in mind that it is not safe to share timestamps between computers.
|
||||
// Behavior may be inconsistent if you get a timestamp from the API Server and
|
||||
// use it on the client machine as part of your ExpirationCache.
|
||||
type TimestampedEntry struct {
|
||||
Obj interface{}
|
||||
Timestamp time.Time
|
||||
key string
|
||||
}
|
||||
|
||||
// getTimestampedEntry returns the timestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
|
||||
// getTimestampedEntry returns the TimestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) {
|
||||
item, _ := c.cacheStorage.Get(key)
|
||||
if tsEntry, ok := item.(*timestampedEntry); ok {
|
||||
if tsEntry, ok := item.(*TimestampedEntry); ok {
|
||||
return tsEntry, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
|
||||
// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't
|
||||
// already expired. It holds a write lock across deletion.
|
||||
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
// Prevent all inserts from the time we deem an item as "expired" to when we
|
||||
@@ -95,11 +99,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
return timestampedItem.obj, true
|
||||
return timestampedItem.Obj, true
|
||||
}
|
||||
|
||||
// GetByKey returns the item stored under the key, or sets exists=false.
|
||||
@@ -126,10 +130,8 @@ func (c *ExpirationCache) List() []interface{} {
|
||||
|
||||
list := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
obj := item.(*timestampedEntry).obj
|
||||
if key, err := c.keyFunc(obj); err != nil {
|
||||
list = append(list, obj)
|
||||
} else if obj, exists := c.getOrExpire(key); exists {
|
||||
key := item.(*TimestampedEntry).key
|
||||
if obj, exists := c.getOrExpire(key); exists {
|
||||
list = append(list, obj)
|
||||
}
|
||||
}
|
||||
@@ -144,14 +146,14 @@ func (c *ExpirationCache) ListKeys() []string {
|
||||
// Add timestamps an item and inserts it into the cache, overwriting entries
|
||||
// that might exist under the same key.
|
||||
func (c *ExpirationCache) Add(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()})
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -163,12 +165,12 @@ func (c *ExpirationCache) Update(obj interface{}) error {
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *ExpirationCache) Delete(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
@@ -177,8 +179,6 @@ func (c *ExpirationCache) Delete(obj interface{}) error {
|
||||
// before attempting the replace operation. The replace operation will
|
||||
// delete the contents of the ExpirationCache `c`.
|
||||
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
items := make(map[string]interface{}, len(list))
|
||||
ts := c.clock.Now()
|
||||
for _, item := range list {
|
||||
@@ -186,8 +186,10 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = ×tampedEntry{item, ts}
|
||||
items[key] = &TimestampedEntry{item, ts, key}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
@@ -199,10 +201,15 @@ func (c *ExpirationCache) Resync() error {
|
||||
|
||||
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
|
||||
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
||||
return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}})
|
||||
}
|
||||
|
||||
// NewExpirationStore creates and returns a ExpirationCache for a given policy
|
||||
func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store {
|
||||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
expirationPolicy: expirationPolicy,
|
||||
}
|
||||
}
|
||||
|
||||
5
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
@@ -33,16 +33,19 @@ func (c *fakeThreadSafeMap) Delete(key string) {
|
||||
}
|
||||
}
|
||||
|
||||
// FakeExpirationPolicy keeps the list for keys which never expires.
|
||||
type FakeExpirationPolicy struct {
|
||||
NeverExpire sets.String
|
||||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
// IsExpired used to check if object is expired.
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
key, _ := p.RetrieveKeyFunc(obj)
|
||||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
||||
// NewFakeExpirationStore creates a new instance for the ExpirationCache.
|
||||
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
|
||||
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
|
||||
return &ExpirationCache{
|
||||
|
||||
6
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
// FakeStore lets you define custom functions for store operations
|
||||
// FakeCustomStore lets you define custom functions for store operations.
|
||||
type FakeCustomStore struct {
|
||||
AddFunc func(obj interface{}) error
|
||||
UpdateFunc func(obj interface{}) error
|
||||
@@ -25,7 +25,7 @@ type FakeCustomStore struct {
|
||||
ListKeysFunc func() []string
|
||||
GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
|
||||
ReplaceFunc func(list []interface{}, resourceVerion string) error
|
||||
ReplaceFunc func(list []interface{}, resourceVersion string) error
|
||||
ResyncFunc func() error
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error {
|
||||
// Update calls the custom Update function if defined
|
||||
func (f *FakeCustomStore) Update(obj interface{}) error {
|
||||
if f.UpdateFunc != nil {
|
||||
return f.Update(obj)
|
||||
return f.UpdateFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
11
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
11
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@@ -34,7 +34,8 @@ type ErrRequeue struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
// ErrFIFOClosed used when FIFO is closed
|
||||
var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
|
||||
func (e ErrRequeue) Error() string {
|
||||
if e.Err == nil {
|
||||
@@ -66,7 +67,7 @@ type Queue interface {
|
||||
Close()
|
||||
}
|
||||
|
||||
// Helper function for popping from Queue.
|
||||
// Pop is helper function for popping from Queue.
|
||||
// WARNING: Do NOT use this function in non-test code to avoid races
|
||||
// unless you really really really really know what you are doing.
|
||||
func Pop(queue Queue) interface{} {
|
||||
@@ -126,7 +127,7 @@ func (f *FIFO) Close() {
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
@@ -242,7 +243,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
// IsClosed checks if the queue is closed
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
@@ -267,7 +268,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
return nil, ErrFIFOClosed
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
|
||||
10
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
10
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
@@ -28,7 +28,9 @@ const (
|
||||
closedMsg = "heap is closed"
|
||||
)
|
||||
|
||||
// LessFunc is used to compare two objects in the heap.
|
||||
type LessFunc func(interface{}, interface{}) bool
|
||||
|
||||
type heapItem struct {
|
||||
obj interface{} // The object which is stored in the heap.
|
||||
index int // The index of the object's key in the Heap.queue.
|
||||
@@ -158,7 +160,7 @@ func (h *Heap) Add(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds all the items in the list to the queue and then signals the condition
|
||||
// BulkAdd adds all the items in the list to the queue and then signals the condition
|
||||
// variable. It is useful when the caller would like to add all of the items
|
||||
// to the queue before consumer starts processing them.
|
||||
func (h *Heap) BulkAdd(list []interface{}) error {
|
||||
@@ -249,11 +251,11 @@ func (h *Heap) Pop() (interface{}, error) {
|
||||
h.cond.Wait()
|
||||
}
|
||||
obj := heap.Pop(h.data)
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
} else {
|
||||
if obj == nil {
|
||||
return nil, fmt.Errorf("object was removed from heap data")
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
|
||||
27
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
27
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
@@ -23,17 +23,27 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Indexer is a storage interface that lets you list objects using multiple indexing functions
|
||||
// Indexer is a storage interface that lets you list objects using multiple indexing functions.
|
||||
// There are three kinds of strings here.
|
||||
// One is a storage key, as defined in the Store interface.
|
||||
// Another kind is a name of an index.
|
||||
// The third kind of string is an "indexed value", which is produced by an
|
||||
// IndexFunc and can be a field value or any other string computed from the object.
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Retrieve list of objects that match on the named indexing function
|
||||
// Index returns the stored objects whose set of indexed values
|
||||
// intersects the set of indexed values of the given object, for
|
||||
// the named index
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
// IndexKeys returns the set of keys that match on the named indexing function.
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
// IndexKeys returns the storage keys of the stored objects whose
|
||||
// set of indexed values for the named index includes the given
|
||||
// indexed value
|
||||
IndexKeys(indexName, indexedValue string) ([]string, error)
|
||||
// ListIndexFuncValues returns all the indexed values of the given index
|
||||
ListIndexFuncValues(indexName string) []string
|
||||
// ByIndex lists object that match on the named indexing function with the exact key
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
// ByIndex returns the stored objects whose set of indexed values
|
||||
// for the named index includes the given indexed value
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
// GetIndexer return the indexers
|
||||
GetIndexers() Indexers
|
||||
|
||||
@@ -42,7 +52,7 @@ type Indexer interface {
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
}
|
||||
|
||||
// IndexFunc knows how to provide an indexed value for an object.
|
||||
// IndexFunc knows how to compute the set of indexed values for an object.
|
||||
type IndexFunc func(obj interface{}) ([]string, error)
|
||||
|
||||
// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
|
||||
@@ -65,6 +75,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
}
|
||||
|
||||
const (
|
||||
// NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field.
|
||||
NamespaceIndex string = "namespace"
|
||||
)
|
||||
|
||||
|
||||
23
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
23
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
@@ -30,8 +30,16 @@ import (
|
||||
// AppendFunc is used to add a matching item to whatever list the caller is using
|
||||
type AppendFunc func(interface{})
|
||||
|
||||
// ListAll calls appendFn with each value retrieved from store which matches the selector.
|
||||
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range store.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -43,9 +51,17 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAllByNamespace used to list items belongs to namespace from Indexer.
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -74,6 +90,12 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
||||
return nil
|
||||
}
|
||||
for _, m := range items {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -104,6 +126,7 @@ type GenericNamespaceLister interface {
|
||||
Get(name string) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// NewGenericLister creates a new instance for the genericLister.
|
||||
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
|
||||
return &genericLister{indexer: indexer, resource: resource}
|
||||
}
|
||||
|
||||
14
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
14
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
@@ -27,15 +27,25 @@ import (
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
// Lister is any object that knows how to perform an initial list.
|
||||
type Lister interface {
|
||||
// List should return a list type object; the Items field will be extracted, and the
|
||||
// ResourceVersion field will be used to start the watch in the right place.
|
||||
List(options metav1.ListOptions) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// Watcher is any object that knows how to start a watch on a resource.
|
||||
type Watcher interface {
|
||||
// Watch should begin a watch at the specified version.
|
||||
Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
Lister
|
||||
Watcher
|
||||
}
|
||||
|
||||
// ListFunc knows how to list resources
|
||||
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
|
||||
1
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
1
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
@@ -42,6 +42,7 @@ type MutationCache interface {
|
||||
Mutation(interface{})
|
||||
}
|
||||
|
||||
// ResourceVersionComparator is able to compare object versions.
|
||||
type ResourceVersionComparator interface {
|
||||
CompareResourceVersion(lhs, rhs runtime.Object) int
|
||||
}
|
||||
|
||||
8
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
8
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@@ -36,12 +36,14 @@ func init() {
|
||||
mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
|
||||
}
|
||||
|
||||
type CacheMutationDetector interface {
|
||||
// MutationDetector is able to monitor if the object be modified outside.
|
||||
type MutationDetector interface {
|
||||
AddObject(obj interface{})
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
func NewCacheMutationDetector(name string) CacheMutationDetector {
|
||||
// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector.
|
||||
func NewCacheMutationDetector(name string) MutationDetector {
|
||||
if !mutationDetectionEnabled {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
@@ -114,7 +116,7 @@ func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
altered := false
|
||||
for i, obj := range d.cachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
|
||||
143
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
143
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -17,18 +17,13 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -37,10 +32,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
@@ -69,6 +67,9 @@ type Reflector struct {
|
||||
lastSyncResourceVersion string
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
// Defaults to pager.PageSize.
|
||||
WatchListPageSize int64
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -80,7 +81,7 @@ var (
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
// The indexer is configured to key on namespace
|
||||
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
|
||||
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
|
||||
return indexer, reflector
|
||||
}
|
||||
@@ -95,17 +96,10 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// reflectorDisambiguator is used to disambiguate started reflectors.
|
||||
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
|
||||
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
// we need this to be unique per process (some names are still the same) but obvious who it belongs to
|
||||
metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
@@ -116,11 +110,6 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
||||
return r
|
||||
}
|
||||
|
||||
func makeValidPrometheusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/"}
|
||||
@@ -173,27 +162,64 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
r.metrics.numberOfLists.Inc()
|
||||
start := r.clock.Now()
|
||||
list, err := r.listerWatcher.List(options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
if r.WatchListPageSize != 0 {
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
}
|
||||
// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
|
||||
list, err = pager.List(context.Background(), options)
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.metrics.listDuration.Observe(time.Since(start).Seconds())
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
@@ -237,9 +263,12 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
r.metrics.numberOfWatches.Inc()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
@@ -254,20 +283,21 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case wait and resend watch request.
|
||||
if urlError, ok := err.(*url.Error); ok {
|
||||
if opError, ok := urlError.Err.(*net.OpError); ok {
|
||||
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if utilnet.IsConnectionRefused(err) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
switch {
|
||||
case apierrs.IsResourceExpired(err):
|
||||
klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -291,11 +321,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
// update metrics
|
||||
defer func() {
|
||||
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
|
||||
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
loop:
|
||||
for {
|
||||
@@ -340,6 +365,8 @@ loop:
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
@@ -349,9 +376,8 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
watchDuration := r.clock.Since(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
r.metrics.numberOfShortWatches.Inc()
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
@@ -370,9 +396,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
|
||||
rv, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
r.metrics.lastResourceVersion.Set(float64(rv))
|
||||
}
|
||||
}
|
||||
|
||||
17
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
17
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
@@ -94,23 +94,6 @@ var metricsFactory = struct {
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
|
||||
144
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
144
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -25,37 +25,124 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/buffer"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/buffer"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
// SharedInformer provides eventually consistent linkage of its
|
||||
// clients to the authoritative state of a given collection of
|
||||
// objects. An object is identified by its API group, kind/resource,
|
||||
// namespace, and name; the `ObjectMeta.UID` is not part of an
|
||||
// object's ID as far as this contract is concerned. One
|
||||
// SharedInformer provides linkage to objects of a particular API
|
||||
// group and kind/resource. The linked object collection of a
|
||||
// SharedInformer may be further restricted to one namespace and/or by
|
||||
// label selector and/or field selector.
|
||||
//
|
||||
// The authoritative state of an object is what apiservers provide
|
||||
// access to, and an object goes through a strict sequence of states.
|
||||
// An object state is either "absent" or present with a
|
||||
// ResourceVersion and other appropriate content.
|
||||
//
|
||||
// A SharedInformer gets object states from apiservers using a
|
||||
// sequence of LIST and WATCH operations. Through this sequence the
|
||||
// apiservers provide a sequence of "collection states" to the
|
||||
// informer, where each collection state defines the state of every
|
||||
// object of the collection. No promise --- beyond what is implied by
|
||||
// other remarks here --- is made about how one informer's sequence of
|
||||
// collection states relates to a different informer's sequence of
|
||||
// collection states.
|
||||
//
|
||||
// A SharedInformer maintains a local cache, exposed by GetStore() and
|
||||
// by GetIndexer() in the case of an indexed informer, of the state of
|
||||
// each relevant object. This cache is eventually consistent with the
|
||||
// authoritative state. This means that, unless prevented by
|
||||
// persistent communication problems, if ever a particular object ID X
|
||||
// is authoritatively associated with a state S then for every
|
||||
// SharedInformer I whose collection includes (X, S) eventually either
|
||||
// (1) I's cache associates X with S or a later state of X, (2) I is
|
||||
// stopped, or (3) the authoritative state service for X terminates.
|
||||
// To be formally complete, we say that the absent state meets any
|
||||
// restriction by label selector or field selector.
|
||||
//
|
||||
// The local cache starts out empty, and gets populated and updated
|
||||
// during `Run()`.
|
||||
//
|
||||
// As a simple example, if a collection of objects is henceforeth
|
||||
// unchanging, a SharedInformer is created that links to that
|
||||
// collection, and that SharedInformer is `Run()` then that
|
||||
// SharedInformer's cache eventually holds an exact copy of that
|
||||
// collection (unless it is stopped too soon, the authoritative state
|
||||
// service ends, or communication problems between the two
|
||||
// persistently thwart achievement).
|
||||
//
|
||||
// As another simple example, if the local cache ever holds a
|
||||
// non-absent state for some object ID and the object is eventually
|
||||
// removed from the authoritative state then eventually the object is
|
||||
// removed from the local cache (unless the SharedInformer is stopped
|
||||
// too soon, the authoritative state service ends, or communication
|
||||
// problems persistently thwart the desired result).
|
||||
//
|
||||
// The keys in the Store are of the form namespace/name for namespaced
|
||||
// objects, and are simply the name for non-namespaced objects.
|
||||
// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for
|
||||
// a given object, and `SplitMetaNamespaceKey(key)` to split a key
|
||||
// into its constituent parts.
|
||||
//
|
||||
// A client is identified here by a ResourceEventHandler. For every
|
||||
// update to the SharedInformer's local cache and for every client
|
||||
// added before `Run()`, eventually either the SharedInformer is
|
||||
// stopped or the client is notified of the update. A client added
|
||||
// after `Run()` starts gets a startup batch of notifications of
|
||||
// additions of the object existing in the cache at the time that
|
||||
// client was added; also, for every update to the SharedInformer's
|
||||
// local cache after that client was added, eventually either the
|
||||
// SharedInformer is stopped or that client is notified of that
|
||||
// update. Client notifications happen after the corresponding cache
|
||||
// update and, in the case of a SharedIndexInformer, after the
|
||||
// corresponding index updates. It is possible that additional cache
|
||||
// and index updates happen before such a prescribed notification.
|
||||
// For a given SharedInformer and client, the notifications are
|
||||
// delivered sequentially. For a given SharedInformer, client, and
|
||||
// object ID, the notifications are delivered in order.
|
||||
//
|
||||
// A client must process each notification promptly; a SharedInformer
|
||||
// is not engineered to deal well with a large backlog of
|
||||
// notifications to deliver. Lengthy processing should be passed off
|
||||
// to something else, for example through a
|
||||
// `client-go/util/workqueue`.
|
||||
//
|
||||
// Each query to an informer's local cache --- whether a single-object
|
||||
// lookup, a list operation, or a use of one of its indices --- is
|
||||
// answered entirely from one of the collection states received by
|
||||
// that informer.
|
||||
//
|
||||
// A delete notification exposes the last locally known non-absent
|
||||
// state, except that its ResourceVersion is replaced with a
|
||||
// ResourceVersion in which the object is actually absent.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer using the specified resync period. The resync
|
||||
// operation consists of delivering to the handler a create
|
||||
// notification for every object in the informer's local cache; it
|
||||
// does not add any interactions with the authoritative storage.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
// GetStore returns the informer's local cache as a Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
// Run starts and runs the shared informer, returning after it stops.
|
||||
// The informer will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
// HasSynced returns true if the shared informer's store has been
|
||||
// informed by at least one full LIST of the authoritative state
|
||||
// of the informer's object collection. This is unrelated to "resync".
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
@@ -63,6 +150,7 @@ type SharedInformer interface {
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
@@ -102,10 +190,26 @@ const (
|
||||
initialBufferSize = 1024
|
||||
)
|
||||
|
||||
// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages
|
||||
// indicating that the caller identified by name is waiting for syncs, followed by
|
||||
// either a successful or failed sync.
|
||||
func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
klog.Infof("Waiting for caches to sync for %s", controllerName)
|
||||
|
||||
if !WaitForCacheSync(stopCh, cacheSyncs...) {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName))
|
||||
return false
|
||||
}
|
||||
|
||||
klog.Infof("Caches are synced for %s ", controllerName)
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the controller should shutdown
|
||||
// callers should prefer WaitForNamedCacheSync()
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
err := wait.PollImmediateUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
@@ -129,7 +233,7 @@ type sharedIndexInformer struct {
|
||||
controller Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
cacheMutationDetector CacheMutationDetector
|
||||
cacheMutationDetector MutationDetector
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher ListerWatcher
|
||||
@@ -169,7 +273,7 @@ func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
func (c *dummyController) LastSyncResourceVersion() string {
|
||||
func (v *dummyController) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -555,7 +659,7 @@ func (p *processorListener) run() {
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
|
||||
}
|
||||
}
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
|
||||
22
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
22
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
|
||||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
// need to de-dupe the return list. Since multiple keys are allowed, this can happen.
|
||||
returnKeySet := sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
set := index[indexKey]
|
||||
for _, key := range set.UnsortedList() {
|
||||
returnKeySet.Insert(key)
|
||||
var returnKeySet sets.String
|
||||
if len(indexKeys) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
returnKeySet = index[indexKeys[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
returnKeySet = sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
for key := range index[indexKey] {
|
||||
returnKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
|
||||
|
||||
set := index[indexKey]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for _, key := range set.List() {
|
||||
for key := range set {
|
||||
list = append(list, c.items[key])
|
||||
}
|
||||
|
||||
@@ -295,6 +302,7 @@ func (c *threadSafeMap) Resync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewThreadSafeStore creates a new instance of ThreadSafeStore.
|
||||
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
|
||||
return &threadSafeMap{
|
||||
items: map[string]interface{}{},
|
||||
|
||||
8
vendor/k8s.io/client-go/tools/cache/undelta_store.go
generated
vendored
8
vendor/k8s.io/client-go/tools/cache/undelta_store.go
generated
vendored
@@ -31,6 +31,7 @@ type UndeltaStore struct {
|
||||
// Assert that it implements the Store interface.
|
||||
var _ Store = &UndeltaStore{}
|
||||
|
||||
// Add inserts an object into the store and sends complete state by calling PushFunc.
|
||||
// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods.
|
||||
// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
|
||||
// and the List. So, the following can happen, resulting in two identical calls to PushFunc.
|
||||
@@ -41,7 +42,6 @@ var _ Store = &UndeltaStore{}
|
||||
// 3 Store.Add(b)
|
||||
// 4 Store.List() -> [a,b]
|
||||
// 5 Store.List() -> [a,b]
|
||||
|
||||
func (u *UndeltaStore) Add(obj interface{}) error {
|
||||
if err := u.Store.Add(obj); err != nil {
|
||||
return err
|
||||
@@ -50,6 +50,7 @@ func (u *UndeltaStore) Add(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc.
|
||||
func (u *UndeltaStore) Update(obj interface{}) error {
|
||||
if err := u.Store.Update(obj); err != nil {
|
||||
return err
|
||||
@@ -58,6 +59,7 @@ func (u *UndeltaStore) Update(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache and sends complete state by calling PushFunc.
|
||||
func (u *UndeltaStore) Delete(obj interface{}) error {
|
||||
if err := u.Store.Delete(obj); err != nil {
|
||||
return err
|
||||
@@ -66,6 +68,10 @@ func (u *UndeltaStore) Delete(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replace will delete the contents of current store, using instead the given list.
|
||||
// 'u' takes ownership of the list, you should not reference the list again
|
||||
// after calling this function.
|
||||
// The new contents complete state will be sent by calling PushFunc after replacement.
|
||||
func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
|
||||
if err := u.Store.Replace(list, resourceVersion); err != nil {
|
||||
return err
|
||||
|
||||
Reference in New Issue
Block a user