feat: kubesphere 4.0 (#6115)
* feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> * feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> --------- Signed-off-by: ci-bot <ci-bot@kubesphere.io> Co-authored-by: ks-ci-bot <ks-ci-bot@example.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
committed by
GitHub
parent
b5015ec7b9
commit
447a51f08b
4
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -2,7 +2,6 @@
|
||||
|
||||
approvers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
@@ -11,7 +10,6 @@ approvers:
|
||||
- ncdc
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
@@ -26,3 +24,5 @@ reviewers:
|
||||
- dims
|
||||
- ingvagabund
|
||||
- ncdc
|
||||
emeritus_approvers:
|
||||
- lavalamp
|
||||
|
||||
88
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
88
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@@ -50,11 +50,12 @@ type Config struct {
|
||||
Process ProcessFunc
|
||||
|
||||
// ObjectType is an example object of the type this controller is
|
||||
// expected to handle. Only the type needs to be right, except
|
||||
// that when that is `unstructured.Unstructured` the object's
|
||||
// `"apiVersion"` and `"kind"` must also be right.
|
||||
// expected to handle.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// ObjectDescription is the description to use when logging type-specific information about this controller.
|
||||
ObjectDescription string
|
||||
|
||||
// FullResyncPeriod is the period at which ShouldResync is considered.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
@@ -84,7 +85,7 @@ type Config struct {
|
||||
type ShouldResyncFunc func() bool
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
type ProcessFunc func(obj interface{}, isInInitialList bool) error
|
||||
|
||||
// `*controller` implements Controller
|
||||
type controller struct {
|
||||
@@ -131,15 +132,18 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
<-stopCh
|
||||
c.config.Queue.Close()
|
||||
}()
|
||||
r := NewReflector(
|
||||
r := NewReflectorWithOptions(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
ReflectorOptions{
|
||||
ResyncPeriod: c.config.FullResyncPeriod,
|
||||
TypeDescription: c.config.ObjectDescription,
|
||||
Clock: c.clock,
|
||||
},
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.WatchListPageSize = c.config.WatchListPageSize
|
||||
r.clock = c.clock
|
||||
if c.config.WatchErrorHandler != nil {
|
||||
r.watchErrorHandler = c.config.WatchErrorHandler
|
||||
}
|
||||
@@ -211,7 +215,7 @@ func (c *controller) processLoop() {
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnAdd(obj interface{}, isInInitialList bool)
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
@@ -220,6 +224,9 @@ type ResourceEventHandler interface {
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler. This adapter does not remove the prohibition against
|
||||
// modifying the objects.
|
||||
//
|
||||
// See ResourceEventHandlerDetailedFuncs if your use needs to propagate
|
||||
// HasSynced.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
@@ -227,7 +234,7 @@ type ResourceEventHandlerFuncs struct {
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
@@ -247,6 +254,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs
|
||||
// except its AddFunc accepts the isInInitialList parameter, for propagating
|
||||
// HasSynced.
|
||||
type ResourceEventHandlerDetailedFuncs struct {
|
||||
AddFunc func(obj interface{}, isInInitialList bool)
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj, isInInitialList)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// FilteringResourceEventHandler applies the provided filter to all events coming
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
@@ -258,11 +295,11 @@ type FilteringResourceEventHandler struct {
|
||||
}
|
||||
|
||||
// OnAdd calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnAdd(obj)
|
||||
r.Handler.OnAdd(obj, isInInitialList)
|
||||
}
|
||||
|
||||
// OnUpdate ensures the proper handler is called depending on whether the filter matches
|
||||
@@ -273,7 +310,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
case newer && older:
|
||||
r.Handler.OnUpdate(oldObj, newObj)
|
||||
case newer && !older:
|
||||
r.Handler.OnAdd(newObj)
|
||||
r.Handler.OnAdd(newObj, false)
|
||||
case !newer && older:
|
||||
r.Handler.OnDelete(oldObj)
|
||||
default:
|
||||
@@ -353,17 +390,6 @@ func NewIndexerInformer(
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
|
||||
}
|
||||
|
||||
// TransformFunc allows for transforming an object before it will be processed
|
||||
// and put into the controller cache and before the corresponding handlers will
|
||||
// be called on it.
|
||||
// TransformFunc (similarly to ResourceEventHandler functions) should be able
|
||||
// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown
|
||||
//
|
||||
// The most common usage pattern is to clean-up some parts of the object to
|
||||
// reduce component memory usage if a given component doesn't care about them.
|
||||
// given controller doesn't care for them
|
||||
type TransformFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// NewTransformingInformer returns a Store and a controller for populating
|
||||
// the store while also providing event notifications. You should only used
|
||||
// the returned Store for Get/List operations; Add/Modify/Deletes will cause
|
||||
@@ -411,19 +437,12 @@ func processDeltas(
|
||||
// Object which receives event notifications from the given deltas
|
||||
handler ResourceEventHandler,
|
||||
clientState Store,
|
||||
transformer TransformFunc,
|
||||
deltas Deltas,
|
||||
isInInitialList bool,
|
||||
) error {
|
||||
// from oldest to newest
|
||||
for _, d := range deltas {
|
||||
obj := d.Object
|
||||
if transformer != nil {
|
||||
var err error
|
||||
obj, err = transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
@@ -436,7 +455,7 @@ func processDeltas(
|
||||
if err := clientState.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnAdd(obj)
|
||||
handler.OnAdd(obj, isInInitialList)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(obj); err != nil {
|
||||
@@ -475,6 +494,7 @@ func newInformer(
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: clientState,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
Transformer: transformer,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
@@ -484,9 +504,9 @@ func newInformer(
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
Process: func(obj interface{}, isInInitialList bool) error {
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(h, clientState, transformer, deltas)
|
||||
return processDeltas(h, clientState, deltas, isInInitialList)
|
||||
}
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
},
|
||||
|
||||
142
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
142
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -51,6 +51,10 @@ type DeltaFIFOOptions struct {
|
||||
// When true, `Replaced` events will be sent for items passed to a Replace() call.
|
||||
// When false, `Sync` events will be sent instead.
|
||||
EmitDeltaTypeReplaced bool
|
||||
|
||||
// If set, will be called for objects before enqueueing them. Please
|
||||
// see the comment on TransformFunc for details.
|
||||
Transformer TransformFunc
|
||||
}
|
||||
|
||||
// DeltaFIFO is like FIFO, but differs in two ways. One is that the
|
||||
@@ -129,8 +133,32 @@ type DeltaFIFO struct {
|
||||
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
|
||||
// DeltaType when Replace() is called (to preserve backwards compat).
|
||||
emitDeltaTypeReplaced bool
|
||||
|
||||
// Called with every object if non-nil.
|
||||
transformer TransformFunc
|
||||
}
|
||||
|
||||
// TransformFunc allows for transforming an object before it will be processed.
|
||||
// TransformFunc (similarly to ResourceEventHandler functions) should be able
|
||||
// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown.
|
||||
//
|
||||
// New in v1.27: In such cases, the contained object will already have gone
|
||||
// through the transform object separately (when it was added / updated prior
|
||||
// to the delete), so the TransformFunc can likely safely ignore such objects
|
||||
// (i.e., just return the input object).
|
||||
//
|
||||
// The most common usage pattern is to clean-up some parts of the object to
|
||||
// reduce component memory usage if a given component doesn't care about them.
|
||||
//
|
||||
// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc
|
||||
// sees the object before any other actor, and it is now safe to mutate the
|
||||
// object in place instead of making a copy.
|
||||
//
|
||||
// Note that TransformFunc is called while inserting objects into the
|
||||
// notification queue and is therefore extremely performance sensitive; please
|
||||
// do not do anything that will take a long time.
|
||||
type TransformFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
@@ -227,6 +255,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
|
||||
knownObjects: opts.KnownObjects,
|
||||
|
||||
emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
|
||||
transformer: opts.Transformer,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
@@ -271,6 +300,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.hasSynced_locked()
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) hasSynced_locked() bool {
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
@@ -411,6 +444,21 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// Every object comes through this code path once, so this is a good
|
||||
// place to call the transform func. If obj is a
|
||||
// DeletedFinalStateUnknown tombstone, then the containted inner object
|
||||
// will already have gone through the transformer, but we document that
|
||||
// this can happen. In cases involving Replace(), such an object can
|
||||
// come through multiple times.
|
||||
if f.transformer != nil {
|
||||
var err error
|
||||
obj, err = f.transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
oldDeltas := f.items[id]
|
||||
newDeltas := append(oldDeltas, Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
@@ -526,6 +574,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
isInInitialList := !f.hasSynced_locked()
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
depth := len(f.queue)
|
||||
@@ -551,7 +600,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"})
|
||||
defer trace.LogIfLong(100 * time.Millisecond)
|
||||
}
|
||||
err := process(item)
|
||||
err := process(item, isInInitialList)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
@@ -566,12 +615,11 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// using the Sync or Replace DeltaType and then (2) it does some deletions.
|
||||
// In particular: for every pre-existing key K that is not the key of
|
||||
// an object in `list` there is the effect of
|
||||
// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object
|
||||
// of K. If `f.knownObjects == nil` then the pre-existing keys are
|
||||
// those in `f.items` and the current object of K is the `.Newest()`
|
||||
// of the Deltas associated with K. Otherwise the pre-existing keys
|
||||
// are those listed by `f.knownObjects` and the current object of K is
|
||||
// what `f.knownObjects.GetByKey(K)` returns.
|
||||
// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known
|
||||
// object of K. The pre-existing keys are those in the union set of the keys in
|
||||
// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is
|
||||
// the one present in the last delta in `f.items`. If there is no delta for K
|
||||
// in `f.items`, it is the object in `f.knownObjects`
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@@ -595,56 +643,54 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
// Do deletion detection against objects in the queue
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
// Delete pre-existing items not in the new list.
|
||||
// This could happen if watch deletion event was missed while
|
||||
// disconnected from apiserver.
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
|
||||
// if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object
|
||||
if d, ok := deletedObj.(DeletedFinalStateUnknown); ok {
|
||||
deletedObj = d.Obj
|
||||
}
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if f.knownObjects != nil {
|
||||
// Detect deletions for objects not present in the queue, but present in KnownObjects
|
||||
knownKeys := f.knownObjects.ListKeys()
|
||||
for _, k := range knownKeys {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
// Delete pre-existing items not in the new list.
|
||||
// This could happen if watch deletion event was missed while
|
||||
// disconnected from apiserver.
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
if len(f.items[k]) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = keys.Len() + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect deletions not already in the queue.
|
||||
knownKeys := f.knownObjects.ListKeys()
|
||||
queuedDeletions := 0
|
||||
for _, k := range knownKeys {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
|
||||
14
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
14
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
// PopProcessFunc is passed to Pop() method of Queue interface.
|
||||
// It is supposed to process the accumulator popped from the queue.
|
||||
type PopProcessFunc func(interface{}) error
|
||||
type PopProcessFunc func(obj interface{}, isInInitialList bool) error
|
||||
|
||||
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
|
||||
// the current item. The value of Err will be returned from Pop.
|
||||
@@ -82,9 +82,12 @@ type Queue interface {
|
||||
// Pop is helper function for popping from Queue.
|
||||
// WARNING: Do NOT use this function in non-test code to avoid races
|
||||
// unless you really really really really know what you are doing.
|
||||
//
|
||||
// NOTE: This function is deprecated and may be removed in the future without
|
||||
// additional warning.
|
||||
func Pop(queue Queue) interface{} {
|
||||
var result interface{}
|
||||
queue.Pop(func(obj interface{}) error {
|
||||
queue.Pop(func(obj interface{}, isInInitialList bool) error {
|
||||
result = obj
|
||||
return nil
|
||||
})
|
||||
@@ -149,6 +152,10 @@ func (f *FIFO) Close() {
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.hasSynced_locked()
|
||||
}
|
||||
|
||||
func (f *FIFO) hasSynced_locked() bool {
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
@@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
isInInitialList := !f.hasSynced_locked()
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
if f.initialPopulationCount > 0 {
|
||||
@@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
err := process(item)
|
||||
err := process(item, isInInitialList)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
|
||||
65
vendor/k8s.io/client-go/tools/cache/object-names.go
generated
vendored
Normal file
65
vendor/k8s.io/client-go/tools/cache/object-names.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ObjectName is a reference to an object of some implicit kind
|
||||
type ObjectName struct {
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
// NewObjectName constructs a new one
|
||||
func NewObjectName(namespace, name string) ObjectName {
|
||||
return ObjectName{Namespace: namespace, Name: name}
|
||||
}
|
||||
|
||||
// Parts is the inverse of the constructor
|
||||
func (objName ObjectName) Parts() (namespace, name string) {
|
||||
return objName.Namespace, objName.Name
|
||||
}
|
||||
|
||||
// String returns the standard string encoding,
|
||||
// which is designed to match the historical behavior of MetaNamespaceKeyFunc.
|
||||
// Note this behavior is different from the String method of types.NamespacedName.
|
||||
func (objName ObjectName) String() string {
|
||||
if len(objName.Namespace) > 0 {
|
||||
return objName.Namespace + "/" + objName.Name
|
||||
}
|
||||
return objName.Name
|
||||
}
|
||||
|
||||
// ParseObjectName tries to parse the standard encoding
|
||||
func ParseObjectName(str string) (ObjectName, error) {
|
||||
var objName ObjectName
|
||||
var err error
|
||||
objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str)
|
||||
return objName, err
|
||||
}
|
||||
|
||||
// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName
|
||||
func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName {
|
||||
return NewObjectName(nn.Namespace, nn.Name)
|
||||
}
|
||||
|
||||
// AsNamespacedName rebrands as a NamespacedName
|
||||
func (objName ObjectName) AsNamespacedName() types.NamespacedName {
|
||||
return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name}
|
||||
}
|
||||
507
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
507
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -22,7 +22,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -40,6 +42,7 @@ import (
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/pointer"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
@@ -49,12 +52,11 @@ const defaultExpectedTypeName = "<unspecified>"
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
|
||||
// The name of the type we expect to place in the store. The name
|
||||
// will be the stringification of expectedGVK if provided, and the
|
||||
// stringification of expectedType otherwise. It is for display
|
||||
// only, and should not be used for parsing or comparison.
|
||||
expectedTypeName string
|
||||
typeDescription string
|
||||
// An example object of the type we expect to place in the store.
|
||||
// Only the type needs to be right, except that when that is
|
||||
// `unstructured.Unstructured` the object's `"apiVersion"` and
|
||||
@@ -66,17 +68,9 @@ type Reflector struct {
|
||||
store Store
|
||||
// listerWatcher is used to perform lists and watches.
|
||||
listerWatcher ListerWatcher
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
|
||||
MaxInternalErrorRetryDuration time.Duration
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
ShouldResync func() bool
|
||||
resyncPeriod time.Duration
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// paginatedResult defines whether pagination should be forced for list calls.
|
||||
@@ -91,6 +85,8 @@ type Reflector struct {
|
||||
isLastSyncResourceVersionUnavailable bool
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
// If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data
|
||||
// (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0")
|
||||
@@ -99,8 +95,19 @@ type Reflector struct {
|
||||
// etcd, which is significantly less efficient and may lead to serious performance and
|
||||
// scalability problems.
|
||||
WatchListPageSize int64
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
ShouldResync func() bool
|
||||
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
|
||||
MaxInternalErrorRetryDuration time.Duration
|
||||
// UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server.
|
||||
// Streaming has the primary advantage of using fewer server's resources to fetch data.
|
||||
//
|
||||
// The old behaviour establishes a LIST request which gets data in chunks.
|
||||
// Paginated list is less efficient and depending on the actual size of objects
|
||||
// might result in an increased memory consumption of the APIServer.
|
||||
//
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details
|
||||
UseWatchList bool
|
||||
}
|
||||
|
||||
// ResourceVersionUpdater is an interface that allows store implementation to
|
||||
@@ -131,13 +138,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) {
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err)
|
||||
case err == io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,7 +162,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
|
||||
return indexer, reflector
|
||||
}
|
||||
|
||||
// NewReflector creates a new Reflector object which will keep the
|
||||
// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack
|
||||
// that is outside this package. See NewReflectorWithOptions for further information.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod})
|
||||
}
|
||||
|
||||
// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further
|
||||
// information.
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod})
|
||||
}
|
||||
|
||||
// ReflectorOptions configures a Reflector.
|
||||
type ReflectorOptions struct {
|
||||
// Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line
|
||||
// in the call stack that is outside this package.
|
||||
Name string
|
||||
|
||||
// TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted
|
||||
// using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is
|
||||
// "<unspecified>". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields
|
||||
// are set, the type description is the string encoding of those. Otherwise, the type description is set to the
|
||||
// go type of expectedType..
|
||||
TypeDescription string
|
||||
|
||||
// ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0
|
||||
// (do not resync).
|
||||
ResyncPeriod time.Duration
|
||||
|
||||
// Clock allows tests to control time. If unset defaults to clock.RealClock{}
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// NewReflectorWithOptions creates a new Reflector object which will keep the
|
||||
// given store up to date with the server's contents for the given
|
||||
// resource. Reflector promises to only put things in the store that
|
||||
// have the type of expectedType, unless expectedType is nil. If
|
||||
@@ -165,49 +205,77 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
|
||||
// "yes". This enables you to use reflectors to periodically process
|
||||
// everything as well as incrementally processing the things that
|
||||
// change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
realClock := &clock.RealClock{}
|
||||
func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector {
|
||||
reflectorClock := options.Clock
|
||||
if reflectorClock == nil {
|
||||
reflectorClock = clock.RealClock{}
|
||||
}
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
name: options.Name,
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
typeDescription: options.TypeDescription,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
|
||||
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
|
||||
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
|
||||
clock: reflectorClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
}
|
||||
r.setExpectedType(expectedType)
|
||||
|
||||
if r.name == "" {
|
||||
r.name = naming.GetNameFromCallsite(internalPackages...)
|
||||
}
|
||||
|
||||
if r.typeDescription == "" {
|
||||
r.typeDescription = getTypeDescriptionFromObject(expectedType)
|
||||
}
|
||||
|
||||
if r.expectedGVK == nil {
|
||||
r.expectedGVK = getExpectedGVKFromObject(expectedType)
|
||||
}
|
||||
|
||||
if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
|
||||
r.UseWatchList = true
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Reflector) setExpectedType(expectedType interface{}) {
|
||||
r.expectedType = reflect.TypeOf(expectedType)
|
||||
if r.expectedType == nil {
|
||||
r.expectedTypeName = defaultExpectedTypeName
|
||||
return
|
||||
func getTypeDescriptionFromObject(expectedType interface{}) string {
|
||||
if expectedType == nil {
|
||||
return defaultExpectedTypeName
|
||||
}
|
||||
|
||||
r.expectedTypeName = r.expectedType.String()
|
||||
reflectDescription := reflect.TypeOf(expectedType).String()
|
||||
|
||||
if obj, ok := expectedType.(*unstructured.Unstructured); ok {
|
||||
// Use gvk to check that watch event objects are of the desired type.
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name)
|
||||
return
|
||||
}
|
||||
r.expectedGVK = &gvk
|
||||
r.expectedTypeName = gvk.String()
|
||||
obj, ok := expectedType.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return reflectDescription
|
||||
}
|
||||
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
return reflectDescription
|
||||
}
|
||||
|
||||
return gvk.String()
|
||||
}
|
||||
|
||||
func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind {
|
||||
obj, ok := expectedType.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &gvk
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
@@ -218,13 +286,13 @@ var internalPackages = []string{"client-go/tools/cache/"}
|
||||
// objects and subsequent deltas.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name)
|
||||
wait.BackoffUntil(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
r.watchErrorHandler(r, err)
|
||||
}
|
||||
}, r.backoffManager, true, stopCh)
|
||||
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -254,79 +322,122 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name)
|
||||
var err error
|
||||
var w watch.Interface
|
||||
fallbackToList := !r.UseWatchList
|
||||
|
||||
err := r.list(stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
if r.UseWatchList {
|
||||
w, err = r.watchList(stopCh)
|
||||
if w == nil && err == nil {
|
||||
// stopCh was closed
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err)
|
||||
fallbackToList = true
|
||||
// ensure that we won't accidentally pass some garbage down the watch.
|
||||
w = nil
|
||||
}
|
||||
}
|
||||
|
||||
if fallbackToList {
|
||||
err = r.list(stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name)
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
defer close(cancelCh)
|
||||
go func() {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
}
|
||||
}()
|
||||
go r.startResync(stopCh, cancelCh, resyncerrc)
|
||||
return r.watch(w, stopCh, resyncerrc)
|
||||
}
|
||||
|
||||
// startResync periodically calls r.store.Resync() method.
|
||||
// Note that this method is blocking and should be
|
||||
// called in a separate goroutine.
|
||||
func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
}
|
||||
}
|
||||
|
||||
// watch simply starts a watch request with the server.
|
||||
func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error {
|
||||
var err error
|
||||
retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock)
|
||||
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
case <-stopCh:
|
||||
// we can only end up here when the stopCh
|
||||
// was closed after a successful watchlist or list request
|
||||
if w != nil {
|
||||
w.Stop()
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: r.LastSyncResourceVersion(),
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent
|
||||
start := r.clock.Now()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
// Do the same for "429" errors.
|
||||
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
|
||||
if w == nil {
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: r.LastSyncResourceVersion(),
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
w, err = r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
if canRetry := isWatchErrorRetriable(err); canRetry {
|
||||
klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err)
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case <-r.backoffManager.Backoff().C():
|
||||
continue
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh)
|
||||
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh)
|
||||
// Ensure that watch will not be reused across iterations.
|
||||
w.Stop()
|
||||
w = nil
|
||||
retry.After(err)
|
||||
if err != nil {
|
||||
if err != errorStopRequested {
|
||||
@@ -335,16 +446,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err)
|
||||
case apierrors.IsTooManyRequests(err):
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription)
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case <-r.backoffManager.Backoff().C():
|
||||
continue
|
||||
}
|
||||
case apierrors.IsInternalError(err) && retry.ShouldRetry():
|
||||
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err)
|
||||
continue
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -399,7 +514,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
@@ -408,7 +523,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
@@ -421,8 +536,8 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err)
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err)
|
||||
return fmt.Errorf("failed to list %v: %w", r.typeDescription, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
@@ -446,7 +561,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
items, err := meta.ExtractListWithAlloc(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
@@ -460,6 +575,120 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// watchList establishes a stream to get a consistent snapshot of data
|
||||
// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal
|
||||
//
|
||||
// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan)
|
||||
// Establishes a consistent stream with the server.
|
||||
// That means the returned data is consistent, as if, served directly from etcd via a quorum read.
|
||||
// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion.
|
||||
// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion.
|
||||
// After receiving a "Bookmark" event the reflector is considered to be synchronized.
|
||||
// It replaces its internal store with the collected items and
|
||||
// reuses the current watch requests for getting further events.
|
||||
//
|
||||
// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan)
|
||||
// Establishes a stream with the server at the provided resource version.
|
||||
// To establish the initial state the server begins with synthetic "Added" events.
|
||||
// It ends with a synthetic "Bookmark" event containing the provided or newer resource version.
|
||||
// After receiving a "Bookmark" event the reflector is considered to be synchronized.
|
||||
// It replaces its internal store with the collected items and
|
||||
// reuses the current watch requests for getting further events.
|
||||
func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
|
||||
var w watch.Interface
|
||||
var err error
|
||||
var temporaryStore Store
|
||||
var resourceVersion string
|
||||
// TODO(#115478): see if this function could be turned
|
||||
// into a method and see if error handling
|
||||
// could be unified with the r.watch method
|
||||
isErrorRetriableWithSideEffectsFn := func(err error) bool {
|
||||
if canRetry := isWatchErrorRetriable(err); canRetry {
|
||||
klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err)
|
||||
<-r.backoffManager.Backoff().C()
|
||||
return true
|
||||
}
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
// we tried to re-establish a watch request but the provided RV
|
||||
// has either expired or it is greater than the server knows about.
|
||||
// In that case we reset the RV and
|
||||
// try to get a consistent snapshot from the watch cache (case 1)
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, nil
|
||||
default:
|
||||
}
|
||||
|
||||
resourceVersion = ""
|
||||
lastKnownRV := r.rewatchResourceVersion()
|
||||
temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
// TODO(#115478): large "list", slow clients, slow network, p&f
|
||||
// might slow down streaming and eventually fail.
|
||||
// maybe in such a case we should retry with an increased timeout?
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: lastKnownRV,
|
||||
AllowWatchBookmarks: true,
|
||||
SendInitialEvents: pointer.Bool(true),
|
||||
ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan,
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
}
|
||||
start := r.clock.Now()
|
||||
|
||||
w, err = r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
if isErrorRetriableWithSideEffectsFn(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
bookmarkReceived := pointer.Bool(false)
|
||||
err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription,
|
||||
func(rv string) { resourceVersion = rv },
|
||||
bookmarkReceived,
|
||||
r.clock, make(chan error), stopCh)
|
||||
if err != nil {
|
||||
w.Stop() // stop and retry with clean state
|
||||
if err == errorStopRequested {
|
||||
return nil, nil
|
||||
}
|
||||
if isErrorRetriableWithSideEffectsFn(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if *bookmarkReceived {
|
||||
break
|
||||
}
|
||||
}
|
||||
// We successfully got initial state from watch-list confirmed by the
|
||||
// "k8s.io/initial-events-end" bookmark.
|
||||
initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())})
|
||||
r.setIsLastSyncResourceVersionUnavailable(false)
|
||||
|
||||
// we utilize the temporaryStore to ensure independence from the current store implementation.
|
||||
// as of today, the store is implemented as a queue and will be drained by the higher-level
|
||||
// component as soon as it finishes replacing the content.
|
||||
checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore)
|
||||
|
||||
if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync watch-list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// syncWith replaces the store's items with the given list.
|
||||
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
|
||||
found := make([]interface{}, 0, len(items))
|
||||
@@ -478,15 +707,17 @@ func watchHandler(start time.Time,
|
||||
name string,
|
||||
expectedTypeName string,
|
||||
setLastSyncResourceVersion func(string),
|
||||
exitOnInitialEventsEndBookmark *bool,
|
||||
clock clock.Clock,
|
||||
errc chan error,
|
||||
stopCh <-chan struct{},
|
||||
) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
if exitOnInitialEventsEndBookmark != nil {
|
||||
// set it to false just in case somebody
|
||||
// made it positive
|
||||
*exitOnInitialEventsEndBookmark = false
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
@@ -541,6 +772,11 @@ loop:
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" {
|
||||
if exitOnInitialEventsEndBookmark != nil {
|
||||
*exitOnInitialEventsEndBookmark = true
|
||||
}
|
||||
}
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
|
||||
}
|
||||
@@ -549,6 +785,11 @@ loop:
|
||||
rvu.UpdateResourceVersion(resourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark {
|
||||
watchDuration := clock.Since(start)
|
||||
klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -597,6 +838,18 @@ func (r *Reflector) relistResourceVersion() string {
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// rewatchResourceVersion determines the resource version the reflector should start streaming from.
|
||||
func (r *Reflector) rewatchResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
if r.isLastSyncResourceVersionUnavailable {
|
||||
// initial stream should return data at the most recent resource version.
|
||||
// the returned data must be consistent i.e. as if served from etcd via a quorum read
|
||||
return ""
|
||||
}
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
|
||||
// "expired" or "too large resource version" error.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
|
||||
@@ -635,5 +888,25 @@ func isTooLargeResourceVersionError(err error) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Matches the message returned by api server before 1.17.0
|
||||
if strings.Contains(apierr.Status().Message, "Too large resource version") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isWatchErrorRetriable determines if it is safe to retry
|
||||
// a watch error retrieved from the server.
|
||||
func isWatchErrorRetriable(err error) bool {
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
// Do the same for "429" errors.
|
||||
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
119
vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
generated
vendored
Normal file
119
vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var dataConsistencyDetectionEnabled = false
|
||||
|
||||
func init() {
|
||||
dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR"))
|
||||
}
|
||||
|
||||
// checkWatchListConsistencyIfRequested performs a data consistency check only when
|
||||
// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
|
||||
//
|
||||
// The consistency check is meant to be enforced only in the CI, not in production.
|
||||
// The check ensures that data retrieved by the watch-list api call
|
||||
// is exactly the same as data received by the standard list api call.
|
||||
//
|
||||
// Note that this function will panic when data inconsistency is detected.
|
||||
// This is intentional because we want to catch it in the CI.
|
||||
func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) {
|
||||
if !dataConsistencyDetectionEnabled {
|
||||
return
|
||||
}
|
||||
checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store)
|
||||
}
|
||||
|
||||
// checkWatchListConsistency exists solely for testing purposes.
|
||||
// we cannot use checkWatchListConsistencyIfRequested because
|
||||
// it is guarded by an environmental variable.
|
||||
// we cannot manipulate the environmental variable because
|
||||
// it will affect other tests in this package.
|
||||
func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) {
|
||||
klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity)
|
||||
opts := metav1.ListOptions{
|
||||
ResourceVersion: lastSyncedResourceVersion,
|
||||
ResourceVersionMatch: metav1.ResourceVersionMatchExact,
|
||||
}
|
||||
var list runtime.Object
|
||||
err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) {
|
||||
list, err = listerWatcher.List(opts)
|
||||
if err != nil {
|
||||
// the consistency check will only be enabled in the CI
|
||||
// and LIST calls in general will be retired by the client-go library
|
||||
// if we fail simply log and retry
|
||||
klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
rawListItems, err := meta.ExtractListWithAlloc(list)
|
||||
if err != nil {
|
||||
panic(err) // this should never happen
|
||||
}
|
||||
|
||||
listItems := toMetaObjectSliceOrDie(rawListItems)
|
||||
storeItems := toMetaObjectSliceOrDie(store.List())
|
||||
|
||||
sort.Sort(byUID(listItems))
|
||||
sort.Sort(byUID(storeItems))
|
||||
|
||||
if !cmp.Equal(listItems, storeItems) {
|
||||
klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems))
|
||||
msg := "data inconsistency detected for the watch-list feature, panicking!"
|
||||
panic(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type byUID []metav1.Object
|
||||
|
||||
func (a byUID) Len() int { return len(a) }
|
||||
func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() }
|
||||
func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object {
|
||||
result := make([]metav1.Object, len(s))
|
||||
for i, v := range s {
|
||||
m, err := meta.Accessor(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
result[i] = m
|
||||
}
|
||||
return result
|
||||
}
|
||||
170
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
170
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache/synctrack"
|
||||
"k8s.io/utils/buffer"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
@@ -132,11 +133,13 @@ import (
|
||||
// state, except that its ResourceVersion is replaced with a
|
||||
// ResourceVersion in which the object is actually absent.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
// It returns a registration handle for the handler that can be used to remove
|
||||
// the handler again.
|
||||
// AddEventHandler adds an event handler to the shared informer using
|
||||
// the shared informer's resync period. Events to a single handler are
|
||||
// delivered sequentially, but there is no coordination between
|
||||
// different handlers.
|
||||
// It returns a registration handle for the handler that can be used to
|
||||
// remove the handler again, or to tell if the handler is synced (has
|
||||
// seen every item in the initial list).
|
||||
AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer with the requested resync period; zero means
|
||||
@@ -169,6 +172,10 @@ type SharedInformer interface {
|
||||
// HasSynced returns true if the shared informer's store has been
|
||||
// informed by at least one full LIST of the authoritative state
|
||||
// of the informer's object collection. This is unrelated to "resync".
|
||||
//
|
||||
// Note that this doesn't tell you if an individual handler is synced!!
|
||||
// For that, please call HasSynced on the handle returned by
|
||||
// AddEventHandler.
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
@@ -198,10 +205,7 @@ type SharedInformer interface {
|
||||
//
|
||||
// Must be set before starting the informer.
|
||||
//
|
||||
// Note: Since the object given to the handler may be already shared with
|
||||
// other goroutines, it is advisable to copy the object being
|
||||
// transform before mutating it at all and returning the copy to prevent
|
||||
// data races.
|
||||
// Please see the comment on TransformFunc for more details.
|
||||
SetTransform(handler TransformFunc) error
|
||||
|
||||
// IsStopped reports whether the informer has already been stopped.
|
||||
@@ -213,7 +217,14 @@ type SharedInformer interface {
|
||||
// Opaque interface representing the registration of ResourceEventHandler for
|
||||
// a SharedInformer. Must be supplied back to the same SharedInformer's
|
||||
// `RemoveEventHandler` to unregister the handlers.
|
||||
type ResourceEventHandlerRegistration interface{}
|
||||
//
|
||||
// Also used to tell if the handler is synced (has had all items in the initial
|
||||
// list delivered).
|
||||
type ResourceEventHandlerRegistration interface {
|
||||
// HasSynced reports if both the parent has synced and all pre-sync
|
||||
// events have been delivered.
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
type SharedIndexInformer interface {
|
||||
@@ -223,14 +234,26 @@ type SharedIndexInformer interface {
|
||||
GetIndexer() Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details.
|
||||
func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// The created informer will not do resyncs if the given
|
||||
// defaultEventHandlerResyncPeriod is zero. Otherwise: for each
|
||||
// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See
|
||||
// NewSharedIndexInformerWithOptions for full details.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
return NewSharedIndexInformerWithOptions(
|
||||
lw,
|
||||
exampleObject,
|
||||
SharedIndexInformerOptions{
|
||||
ResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
Indexers: indexers,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher.
|
||||
// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each
|
||||
// handler that with a non-zero requested resync period, whether added
|
||||
// before or after the informer starts, the nominal resync period is
|
||||
// the requested resync period rounded up to a multiple of the
|
||||
@@ -238,21 +261,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv
|
||||
// checking period is established when the informer starts running,
|
||||
// and is the maximum of (a) the minimum of the resync periods
|
||||
// requested before the informer starts and the
|
||||
// defaultEventHandlerResyncPeriod given here and (b) the constant
|
||||
// options.ResyncPeriod given here and (b) the constant
|
||||
// `minimumResyncPeriod` defined in this file.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer {
|
||||
realClock := &clock.RealClock{}
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
|
||||
return &sharedIndexInformer{
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers),
|
||||
processor: &sharedProcessor{clock: realClock},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: exampleObject,
|
||||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
|
||||
objectDescription: options.ObjectDescription,
|
||||
resyncCheckPeriod: options.ResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: options.ResyncPeriod,
|
||||
clock: realClock,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// SharedIndexInformerOptions configures a sharedIndexInformer.
|
||||
type SharedIndexInformerOptions struct {
|
||||
// ResyncPeriod is the default event handler resync period and resync check
|
||||
// period. If unset/unspecified, these are defaulted to 0 (do not resync).
|
||||
ResyncPeriod time.Duration
|
||||
|
||||
// Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured.
|
||||
Indexers Indexers
|
||||
|
||||
// ObjectDescription is the sharedIndexInformer's object description. This is passed through to the
|
||||
// underlying Reflector's type description.
|
||||
ObjectDescription string
|
||||
}
|
||||
|
||||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
@@ -296,11 +334,9 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
|
||||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -326,12 +362,13 @@ type sharedIndexInformer struct {
|
||||
|
||||
listerWatcher ListerWatcher
|
||||
|
||||
// objectType is an example object of the type this informer is
|
||||
// expected to handle. Only the type needs to be right, except
|
||||
// that when that is `unstructured.Unstructured` the object's
|
||||
// `"apiVersion"` and `"kind"` must also be right.
|
||||
// objectType is an example object of the type this informer is expected to handle. If set, an event
|
||||
// with an object with a mismatching type is dropped instead of being delivered to listeners.
|
||||
objectType runtime.Object
|
||||
|
||||
// objectDescription is the description of this informer's objects. This typically defaults to
|
||||
objectDescription string
|
||||
|
||||
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
|
||||
// shouldResync to check if any of our listeners need a resync.
|
||||
resyncCheckPeriod time.Duration
|
||||
@@ -381,7 +418,8 @@ type updateNotification struct {
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
newObj interface{}
|
||||
isInInitialList bool
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
@@ -419,27 +457,30 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
|
||||
return
|
||||
}
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
}
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
Transformer: s.transform,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
ObjectDescription: s.objectDescription,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
}
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.controller.(*controller).clock = s.clock
|
||||
s.started = true
|
||||
@@ -559,7 +600,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
}
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced)
|
||||
|
||||
if !s.started {
|
||||
return s.processor.addListener(listener), nil
|
||||
@@ -575,27 +616,35 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
|
||||
handle := s.processor.addListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
// Note that we enqueue these notifications with the lock held
|
||||
// and before returning the handle. That means there is never a
|
||||
// chance for anyone to call the handle's HasSynced method in a
|
||||
// state when it would falsely return true (i.e., when the
|
||||
// shared informer is synced but it has not observed an Add
|
||||
// with isInitialList being true, nor when the thread
|
||||
// processing notifications somehow goes faster than this
|
||||
// thread adding them and the counter is temporarily zero).
|
||||
listener.add(addNotification{newObj: item, isInInitialList: true})
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(s, s.indexer, s.transform, deltas)
|
||||
return processDeltas(s, s.indexer, deltas, isInInitialList)
|
||||
}
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnAdd(obj interface{}) {
|
||||
func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.cacheMutationDetector.AddObject(obj)
|
||||
s.processor.distribute(addNotification{newObj: obj}, false)
|
||||
s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false)
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
@@ -817,6 +866,8 @@ type processorListener struct {
|
||||
|
||||
handler ResourceEventHandler
|
||||
|
||||
syncTracker *synctrack.SingleFileTracker
|
||||
|
||||
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
|
||||
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
@@ -847,11 +898,18 @@ type processorListener struct {
|
||||
resyncLock sync.Mutex
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
|
||||
// HasSynced returns true if the source informer has synced, and all
|
||||
// corresponding events have been delivered.
|
||||
func (p *processorListener) HasSynced() bool {
|
||||
return p.syncTracker.HasSynced()
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener {
|
||||
ret := &processorListener{
|
||||
nextCh: make(chan interface{}),
|
||||
addCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced},
|
||||
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
|
||||
requestedResyncPeriod: requestedResyncPeriod,
|
||||
resyncPeriod: resyncPeriod,
|
||||
@@ -863,6 +921,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
if a, ok := notification.(addNotification); ok && a.isInInitialList {
|
||||
p.syncTracker.Start()
|
||||
}
|
||||
p.addCh <- notification
|
||||
}
|
||||
|
||||
@@ -908,7 +969,10 @@ func (p *processorListener) run() {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
p.handler.OnAdd(notification.newObj, notification.isInInitialList)
|
||||
if notification.isInInitialList {
|
||||
p.syncTracker.Finished()
|
||||
}
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
|
||||
31
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
31
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Store is a generic object storage and processing interface. A
|
||||
@@ -99,20 +100,38 @@ type ExplicitKey string
|
||||
// The key uses the format <namespace>/<name> unless <namespace> is empty, then
|
||||
// it's just <name>.
|
||||
//
|
||||
// TODO: replace key-as-string with a key-as-struct so that this
|
||||
// packing/unpacking won't be necessary.
|
||||
// Clients that want a structured alternative can use ObjectToName or MetaObjectToName.
|
||||
// Note: this would not be a client that wants a key for a Store because those are
|
||||
// necessarily strings.
|
||||
//
|
||||
// TODO maybe some day?: change Store to be keyed differently
|
||||
func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if key, ok := obj.(ExplicitKey); ok {
|
||||
return string(key), nil
|
||||
}
|
||||
objName, err := ObjectToName(obj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return objName.String(), nil
|
||||
}
|
||||
|
||||
// ObjectToName returns the structured name for the given object,
|
||||
// if indeed it can be viewed as a metav1.Object.
|
||||
func ObjectToName(obj interface{}) (ObjectName, error) {
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("object has no meta: %v", err)
|
||||
return ObjectName{}, fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
if len(meta.GetNamespace()) > 0 {
|
||||
return meta.GetNamespace() + "/" + meta.GetName(), nil
|
||||
return MetaObjectToName(meta), nil
|
||||
}
|
||||
|
||||
// MetaObjectToName returns the structured name for the given object
|
||||
func MetaObjectToName(obj metav1.Object) ObjectName {
|
||||
if len(obj.GetNamespace()) > 0 {
|
||||
return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()}
|
||||
}
|
||||
return meta.GetName(), nil
|
||||
return ObjectName{Namespace: "", Name: obj.GetName()}
|
||||
}
|
||||
|
||||
// SplitMetaNamespaceKey returns the namespace and name that
|
||||
|
||||
83
vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go
generated
vendored
Normal file
83
vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package synctrack
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Lazy defers the computation of `Evaluate` to when it is necessary. It is
|
||||
// possible that Evaluate will be called in parallel from multiple goroutines.
|
||||
type Lazy[T any] struct {
|
||||
Evaluate func() (T, error)
|
||||
|
||||
cache atomic.Pointer[cacheEntry[T]]
|
||||
}
|
||||
|
||||
type cacheEntry[T any] struct {
|
||||
eval func() (T, error)
|
||||
lock sync.RWMutex
|
||||
result *T
|
||||
}
|
||||
|
||||
func (e *cacheEntry[T]) get() (T, error) {
|
||||
if cur := func() *T {
|
||||
e.lock.RLock()
|
||||
defer e.lock.RUnlock()
|
||||
return e.result
|
||||
}(); cur != nil {
|
||||
return *cur, nil
|
||||
}
|
||||
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
if e.result != nil {
|
||||
return *e.result, nil
|
||||
}
|
||||
r, err := e.eval()
|
||||
if err == nil {
|
||||
e.result = &r
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] {
|
||||
return &cacheEntry[T]{eval: z.Evaluate}
|
||||
}
|
||||
|
||||
// Notify should be called when something has changed necessitating a new call
|
||||
// to Evaluate.
|
||||
func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) }
|
||||
|
||||
// Get should be called to get the current result of a call to Evaluate. If the
|
||||
// current cached value is stale (due to a call to Notify), then Evaluate will
|
||||
// be called synchronously. If subsequent calls to Get happen (without another
|
||||
// Notify), they will all wait for the same return value.
|
||||
//
|
||||
// Error returns are not cached and will cause multiple calls to evaluate!
|
||||
func (z *Lazy[T]) Get() (T, error) {
|
||||
e := z.cache.Load()
|
||||
if e == nil {
|
||||
// Since we don't force a constructor, nil is a possible value.
|
||||
// If multiple Gets race to set this, the swap makes sure only
|
||||
// one wins.
|
||||
z.cache.CompareAndSwap(nil, z.newCacheEntry())
|
||||
e = z.cache.Load()
|
||||
}
|
||||
return e.get()
|
||||
}
|
||||
120
vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go
generated
vendored
Normal file
120
vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package synctrack contains utilities for helping controllers track whether
|
||||
// they are "synced" or not, that is, whether they have processed all items
|
||||
// from the informer's initial list.
|
||||
package synctrack
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// AsyncTracker helps propagate HasSynced in the face of multiple worker threads.
|
||||
type AsyncTracker[T comparable] struct {
|
||||
UpstreamHasSynced func() bool
|
||||
|
||||
lock sync.Mutex
|
||||
waiting sets.Set[T]
|
||||
}
|
||||
|
||||
// Start should be called prior to processing each key which is part of the
|
||||
// initial list.
|
||||
func (t *AsyncTracker[T]) Start(key T) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.waiting == nil {
|
||||
t.waiting = sets.New[T](key)
|
||||
} else {
|
||||
t.waiting.Insert(key)
|
||||
}
|
||||
}
|
||||
|
||||
// Finished should be called when finished processing a key which was part of
|
||||
// the initial list. Since keys are tracked individually, nothing bad happens
|
||||
// if you call Finished without a corresponding call to Start. This makes it
|
||||
// easier to use this in combination with e.g. queues which don't make it easy
|
||||
// to plumb through the isInInitialList boolean.
|
||||
func (t *AsyncTracker[T]) Finished(key T) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.waiting != nil {
|
||||
t.waiting.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// HasSynced returns true if the source is synced and every key present in the
|
||||
// initial list has been processed. This relies on the source not considering
|
||||
// itself synced until *after* it has delivered the notification for the last
|
||||
// key, and that notification handler must have called Start.
|
||||
func (t *AsyncTracker[T]) HasSynced() bool {
|
||||
// Call UpstreamHasSynced first: it might take a lock, which might take
|
||||
// a significant amount of time, and we can't hold our lock while
|
||||
// waiting on that or a user is likely to get a deadlock.
|
||||
if !t.UpstreamHasSynced() {
|
||||
return false
|
||||
}
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
return t.waiting.Len() == 0
|
||||
}
|
||||
|
||||
// SingleFileTracker helps propagate HasSynced when events are processed in
|
||||
// order (i.e. via a queue).
|
||||
type SingleFileTracker struct {
|
||||
// Important: count is used with atomic operations so it must be 64-bit
|
||||
// aligned, otherwise atomic operations will panic. Having it at the top of
|
||||
// the struct will guarantee that, even on 32-bit arches.
|
||||
// See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information.
|
||||
count int64
|
||||
|
||||
UpstreamHasSynced func() bool
|
||||
}
|
||||
|
||||
// Start should be called prior to processing each key which is part of the
|
||||
// initial list.
|
||||
func (t *SingleFileTracker) Start() {
|
||||
atomic.AddInt64(&t.count, 1)
|
||||
}
|
||||
|
||||
// Finished should be called when finished processing a key which was part of
|
||||
// the initial list. You must never call Finished() before (or without) its
|
||||
// corresponding Start(), that is a logic error that could cause HasSynced to
|
||||
// return a wrong value. To help you notice this should it happen, Finished()
|
||||
// will panic if the internal counter goes negative.
|
||||
func (t *SingleFileTracker) Finished() {
|
||||
result := atomic.AddInt64(&t.count, -1)
|
||||
if result < 0 {
|
||||
panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value")
|
||||
}
|
||||
}
|
||||
|
||||
// HasSynced returns true if the source is synced and every key present in the
|
||||
// initial list has been processed. This relies on the source not considering
|
||||
// itself synced until *after* it has delivered the notification for the last
|
||||
// key, and that notification handler must have called Start.
|
||||
func (t *SingleFileTracker) HasSynced() bool {
|
||||
// Call UpstreamHasSynced first: it might take a lock, which might take
|
||||
// a significant amount of time, and we don't want to then act on a
|
||||
// stale count value.
|
||||
if !t.UpstreamHasSynced() {
|
||||
return false
|
||||
}
|
||||
return atomic.LoadInt64(&t.count) <= 0
|
||||
}
|
||||
14
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
14
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
@@ -67,7 +67,7 @@ type Preferences struct {
|
||||
type Cluster struct {
|
||||
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
LocationOfOrigin string
|
||||
LocationOfOrigin string `json:"-"`
|
||||
// Server is the address of the kubernetes cluster (https://hostname:port).
|
||||
Server string `json:"server"`
|
||||
// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
|
||||
@@ -107,7 +107,7 @@ type Cluster struct {
|
||||
type AuthInfo struct {
|
||||
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
LocationOfOrigin string
|
||||
LocationOfOrigin string `json:"-"`
|
||||
// ClientCertificate is the path to a client cert file for TLS.
|
||||
// +optional
|
||||
ClientCertificate string `json:"client-certificate,omitempty"`
|
||||
@@ -159,7 +159,7 @@ type AuthInfo struct {
|
||||
type Context struct {
|
||||
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
LocationOfOrigin string
|
||||
LocationOfOrigin string `json:"-"`
|
||||
// Cluster is the name of the cluster for this context
|
||||
Cluster string `json:"cluster"`
|
||||
// AuthInfo is the name of the authInfo for this context
|
||||
@@ -252,7 +252,7 @@ type ExecConfig struct {
|
||||
// recommended as one of the prime benefits of exec plugins is that no secrets need
|
||||
// to be stored directly in the kubeconfig.
|
||||
// +k8s:conversion-gen=false
|
||||
Config runtime.Object
|
||||
Config runtime.Object `json:"-"`
|
||||
|
||||
// InteractiveMode determines this plugin's relationship with standard input. Valid
|
||||
// values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
|
||||
@@ -264,7 +264,7 @@ type ExecConfig struct {
|
||||
// client.authentication.k8s.io/v1beta1, then this field is optional and defaults
|
||||
// to "IfAvailable" when unset. Otherwise, this field is required.
|
||||
// +optional
|
||||
InteractiveMode ExecInteractiveMode
|
||||
InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"`
|
||||
|
||||
// StdinUnavailable indicates whether the exec authenticator can pass standard
|
||||
// input through to this exec plugin. For example, a higher level entity might be using
|
||||
@@ -272,14 +272,14 @@ type ExecConfig struct {
|
||||
// plugin to use standard input. This is kept here in order to keep all of the exec configuration
|
||||
// together, but it is never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
StdinUnavailable bool
|
||||
StdinUnavailable bool `json:"-"`
|
||||
|
||||
// StdinUnavailableMessage is an optional message to be displayed when the exec authenticator
|
||||
// cannot successfully run this exec plugin because it needs to use standard input and
|
||||
// StdinUnavailable is true. For example, a process that is already using standard input to
|
||||
// read user instructions might set this to "used by my-program to read user instructions".
|
||||
// +k8s:conversion-gen=false
|
||||
StdinUnavailableMessage string
|
||||
StdinUnavailableMessage string `json:"-"`
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(ExecConfig)
|
||||
|
||||
24
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
24
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@@ -128,6 +128,28 @@ type ClientConfigLoadingRules struct {
|
||||
// WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not.
|
||||
// In case of missing files, it warns the user about the missing files.
|
||||
WarnIfAllMissing bool
|
||||
|
||||
// Warner is the warning log callback to use in case of missing files.
|
||||
Warner WarningHandler
|
||||
}
|
||||
|
||||
// WarningHandler allows to set the logging function to use
|
||||
type WarningHandler func(error)
|
||||
|
||||
func (handler WarningHandler) Warn(err error) {
|
||||
if handler == nil {
|
||||
klog.V(1).Info(err)
|
||||
} else {
|
||||
handler(err)
|
||||
}
|
||||
}
|
||||
|
||||
type MissingConfigError struct {
|
||||
Missing []string
|
||||
}
|
||||
|
||||
func (c MissingConfigError) Error() string {
|
||||
return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", "))
|
||||
}
|
||||
|
||||
// ClientConfigLoadingRules implements the ClientConfigLoader interface.
|
||||
@@ -219,7 +241,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
||||
}
|
||||
|
||||
if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 {
|
||||
klog.Warningf("Config not found: %s", strings.Join(missingList, ", "))
|
||||
rules.Warner.Warn(MissingConfigError{Missing: missingList})
|
||||
}
|
||||
|
||||
// first merge all of our maps
|
||||
|
||||
4
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
4
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
@@ -49,12 +49,12 @@ type InClusterConfig interface {
|
||||
Possible() bool
|
||||
}
|
||||
|
||||
// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
|
||||
// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name
|
||||
func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig {
|
||||
return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}}
|
||||
}
|
||||
|
||||
// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader
|
||||
// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader
|
||||
func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig {
|
||||
return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader}
|
||||
}
|
||||
|
||||
153
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
153
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
@@ -56,9 +56,11 @@ var defaultSleepDuration = 10 * time.Second
|
||||
|
||||
// TODO: validate impact of copying and investigate hashing
|
||||
type eventKey struct {
|
||||
eventType string
|
||||
action string
|
||||
reason string
|
||||
reportingController string
|
||||
reportingInstance string
|
||||
regarding corev1.ObjectReference
|
||||
related corev1.ObjectReference
|
||||
}
|
||||
@@ -79,27 +81,27 @@ type EventSinkImpl struct {
|
||||
}
|
||||
|
||||
// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't create an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{})
|
||||
return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't update an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{})
|
||||
return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) {
|
||||
func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't patch an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
// NewBroadcaster Creates a new event broadcaster.
|
||||
@@ -122,13 +124,13 @@ func (e *eventBroadcasterImpl) Shutdown() {
|
||||
}
|
||||
|
||||
// refreshExistingEventSeries refresh events TTL
|
||||
func (e *eventBroadcasterImpl) refreshExistingEventSeries() {
|
||||
func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) {
|
||||
// TODO: Investigate whether lock contention won't be a problem
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
for isomorphicKey, event := range e.eventCache {
|
||||
if event.Series != nil {
|
||||
if recordedEvent, retry := recordEvent(e.sink, event); !retry {
|
||||
if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
if recordedEvent != nil {
|
||||
e.eventCache[isomorphicKey] = recordedEvent
|
||||
}
|
||||
@@ -140,7 +142,7 @@ func (e *eventBroadcasterImpl) refreshExistingEventSeries() {
|
||||
// finishSeries checks if a series has ended and either:
|
||||
// - write final count to the apiserver
|
||||
// - delete a singleton event (i.e. series field is nil) from the cache
|
||||
func (e *eventBroadcasterImpl) finishSeries() {
|
||||
func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) {
|
||||
// TODO: Investigate whether lock contention won't be a problem
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
@@ -148,7 +150,7 @@ func (e *eventBroadcasterImpl) finishSeries() {
|
||||
eventSerie := event.Series
|
||||
if eventSerie != nil {
|
||||
if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) {
|
||||
if _, retry := recordEvent(e.sink, event); !retry {
|
||||
if _, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
delete(e.eventCache, isomorphicKey)
|
||||
}
|
||||
}
|
||||
@@ -159,13 +161,13 @@ func (e *eventBroadcasterImpl) finishSeries() {
|
||||
}
|
||||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder {
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger {
|
||||
hostname, _ := os.Hostname()
|
||||
reportingInstance := reportingController + "-" + hostname
|
||||
return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}
|
||||
return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) {
|
||||
func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) {
|
||||
// Make a copy before modification, because there could be multiple listeners.
|
||||
eventCopy := event.DeepCopy()
|
||||
go func() {
|
||||
@@ -181,60 +183,67 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C
|
||||
return nil
|
||||
}
|
||||
isomorphicEvent.Series = &eventsv1.EventSeries{
|
||||
Count: 1,
|
||||
Count: 2,
|
||||
LastObservedTime: metav1.MicroTime{Time: clock.Now()},
|
||||
}
|
||||
return isomorphicEvent
|
||||
// Make a copy of the Event to make sure that recording it
|
||||
// doesn't mess with the object stored in cache.
|
||||
return isomorphicEvent.DeepCopy()
|
||||
}
|
||||
e.eventCache[eventKey] = eventCopy
|
||||
return eventCopy
|
||||
// Make a copy of the Event to make sure that recording it doesn't
|
||||
// mess with the object stored in cache.
|
||||
return eventCopy.DeepCopy()
|
||||
}()
|
||||
if evToRecord != nil {
|
||||
recordedEvent := e.attemptRecording(evToRecord)
|
||||
if recordedEvent != nil {
|
||||
recordedEventKey := getKey(recordedEvent)
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
e.eventCache[recordedEventKey] = recordedEvent
|
||||
}
|
||||
// TODO: Add a metric counting the number of recording attempts
|
||||
e.attemptRecording(ctx, evToRecord)
|
||||
// We don't want the new recorded Event to be reflected in the
|
||||
// client's cache because server-side mutations could mess with the
|
||||
// aggregation mechanism used by the client.
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event {
|
||||
func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) {
|
||||
tries := 0
|
||||
for {
|
||||
if recordedEvent, retry := recordEvent(e.sink, event); !retry {
|
||||
return recordedEvent
|
||||
if _, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
return
|
||||
}
|
||||
tries++
|
||||
if tries >= maxTriesPerEvent {
|
||||
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
||||
return nil
|
||||
klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event)
|
||||
return
|
||||
}
|
||||
// Randomize sleep so that various clients won't all be
|
||||
// synced up if the master goes down.
|
||||
time.Sleep(wait.Jitter(e.sleepDuration, 0.25))
|
||||
// synced up if the master goes down. Give up when
|
||||
// the context is canceled.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(wait.Jitter(e.sleepDuration, 0.25)):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) {
|
||||
func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) {
|
||||
var newEvent *eventsv1.Event
|
||||
var err error
|
||||
isEventSeries := event.Series != nil
|
||||
if isEventSeries {
|
||||
patch, patchBytesErr := createPatchBytesForSeries(event)
|
||||
if patchBytesErr != nil {
|
||||
klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr)
|
||||
klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible")
|
||||
return nil, false
|
||||
}
|
||||
newEvent, err = sink.Patch(event, patch)
|
||||
newEvent, err = sink.Patch(ctx, event, patch)
|
||||
}
|
||||
// Update can fail because the event may have been removed and it no longer exists.
|
||||
if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) {
|
||||
// Making sure that ResourceVersion is empty on creation
|
||||
event.ResourceVersion = ""
|
||||
newEvent, err = sink.Create(event)
|
||||
newEvent, err = sink.Create(ctx, event)
|
||||
}
|
||||
if err == nil {
|
||||
return newEvent, false
|
||||
@@ -244,13 +253,21 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool)
|
||||
switch err.(type) {
|
||||
case *restclient.RequestConstructionError:
|
||||
// We will construct the request the same next time, so don't keep trying.
|
||||
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event)
|
||||
return nil, false
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) {
|
||||
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
// If we tried to create an Event from an EventSerie, it means that
|
||||
// the original Patch request failed because the Event we were
|
||||
// trying to patch didn't exist. If the creation failed because the
|
||||
// Event now exists, it is safe to retry. This occurs when a new
|
||||
// Event is emitted twice in a very short period of time.
|
||||
if isEventSeries {
|
||||
return nil, true
|
||||
}
|
||||
klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err)
|
||||
} else {
|
||||
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event)
|
||||
}
|
||||
return nil, false
|
||||
case *errors.UnexpectedObjectError:
|
||||
@@ -259,7 +276,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool)
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
@@ -279,9 +296,11 @@ func createPatchBytesForSeries(event *eventsv1.Event) ([]byte, error) {
|
||||
|
||||
func getKey(event *eventsv1.Event) eventKey {
|
||||
key := eventKey{
|
||||
eventType: event.Type,
|
||||
action: event.Action,
|
||||
reason: event.Reason,
|
||||
reportingController: event.ReportingController,
|
||||
reportingInstance: event.ReportingInstance,
|
||||
regarding: event.Regarding,
|
||||
}
|
||||
if event.Related != nil {
|
||||
@@ -293,29 +312,38 @@ func getKey(event *eventsv1.Event) eventKey {
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
// TODO: this function should also return an error.
|
||||
//
|
||||
// Deprecated: use StartLogging instead.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() {
|
||||
stopWatcher, err := e.StartEventWatcher(
|
||||
func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note)
|
||||
})
|
||||
logger := klog.Background().V(int(verbosity))
|
||||
stopWatcher, err := e.StartLogging(logger)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to start event watcher: '%v'", err)
|
||||
logger.Error(err, "Failed to start event watcher")
|
||||
return func() {}
|
||||
}
|
||||
return stopWatcher
|
||||
}
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the structured logger.
|
||||
// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`).
|
||||
// The returned function can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) {
|
||||
return e.StartEventWatcher(
|
||||
func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
logger.Error(nil, "unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note)
|
||||
})
|
||||
}
|
||||
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value is used to stop recording
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) {
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
@@ -331,37 +359,42 @@ func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime
|
||||
return watcher.Stop, nil
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) error {
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error {
|
||||
eventHandler := func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
e.recordToSink(event, clock.RealClock{})
|
||||
e.recordToSink(ctx, event, clock.RealClock{})
|
||||
}
|
||||
stopWatcher, err := e.StartEventWatcher(eventHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-stopCh
|
||||
<-ctx.Done()
|
||||
stopWatcher()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
// Deprecated: use StartRecordingToSinkWithContext instead.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) {
|
||||
go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh)
|
||||
go wait.Until(e.finishSeries, finishTime, stopCh)
|
||||
err := e.startRecordingEvents(stopCh)
|
||||
err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh))
|
||||
if err != nil {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
klog.Background().Error(err, "Failed to start recording to sink")
|
||||
}
|
||||
}
|
||||
|
||||
// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error {
|
||||
go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime)
|
||||
go wait.UntilWithContext(ctx, e.finishSeries, finishTime)
|
||||
return e.startRecordingEvents(ctx)
|
||||
}
|
||||
|
||||
type eventBroadcasterAdapterImpl struct {
|
||||
coreClient typedv1core.EventsGetter
|
||||
coreBroadcaster record.EventBroadcaster
|
||||
@@ -395,14 +428,14 @@ func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder {
|
||||
func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger {
|
||||
if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil {
|
||||
return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name)
|
||||
}
|
||||
return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name))
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder {
|
||||
func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger {
|
||||
return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name})
|
||||
}
|
||||
|
||||
|
||||
29
vendor/k8s.io/client-go/tools/events/event_recorder.go
generated
vendored
29
vendor/k8s.io/client-go/tools/events/event_recorder.go
generated
vendored
@@ -40,12 +40,33 @@ type recorderImpl struct {
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
var _ EventRecorder = &recorderImpl{}
|
||||
|
||||
func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
timestamp := metav1.MicroTime{time.Now()}
|
||||
recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...)
|
||||
}
|
||||
|
||||
type recorderImplLogger struct {
|
||||
*recorderImpl
|
||||
logger klog.Logger
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &recorderImplLogger{}
|
||||
|
||||
func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...)
|
||||
}
|
||||
|
||||
func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger}
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
timestamp := metav1.MicroTime{Time: time.Now()}
|
||||
message := fmt.Sprintf(note, args...)
|
||||
refRegarding, err := reference.GetReference(recorder.scheme, regarding)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message)
|
||||
logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -53,11 +74,11 @@ func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.O
|
||||
if related != nil {
|
||||
refRelated, err = reference.GetReference(recorder.scheme, related)
|
||||
if err != nil {
|
||||
klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err)
|
||||
logger.V(9).Info("Could not construct reference", "object", related, "err", err)
|
||||
}
|
||||
}
|
||||
if !util.ValidateEventType(eventtype) {
|
||||
klog.Errorf("Unsupported event type: '%v'", eventtype)
|
||||
logger.Error(nil, "Unsupported event type", "eventType", eventtype)
|
||||
return
|
||||
}
|
||||
event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action)
|
||||
|
||||
7
vendor/k8s.io/client-go/tools/events/fake.go
generated
vendored
7
vendor/k8s.io/client-go/tools/events/fake.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
|
||||
@@ -29,6 +30,8 @@ type FakeRecorder struct {
|
||||
Events chan string
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &FakeRecorder{}
|
||||
|
||||
// Eventf emits an event
|
||||
func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
@@ -36,6 +39,10 @@ func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFakeRecorder creates new fake event recorder with event channel with
|
||||
// buffer of given size.
|
||||
func NewFakeRecorder(bufferSize int) *FakeRecorder {
|
||||
|
||||
45
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
45
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
@@ -17,39 +17,30 @@ limitations under the License.
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
internalevents "k8s.io/client-go/tools/internal/events"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// EventRecorder knows how to record events on behalf of an EventSource.
|
||||
type EventRecorder interface {
|
||||
// Eventf constructs an event from the given information and puts it in the queue for sending.
|
||||
// 'regarding' is the object this event is about. Event will make a reference-- or you may also
|
||||
// pass a reference to the object directly.
|
||||
// 'related' is the secondary object for more complex actions. E.g. when regarding object triggers
|
||||
// a creation or deletion of related object.
|
||||
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
|
||||
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
|
||||
// to automate handling of events, so imagine people writing switch statements to handle them.
|
||||
// You want to make that easy.
|
||||
// 'action' explains what happened with regarding/what action did the ReportingController
|
||||
// (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.)
|
||||
// take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter).
|
||||
// 'note' is intended to be human readable.
|
||||
Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{})
|
||||
}
|
||||
type EventRecorder = internalevents.EventRecorder
|
||||
type EventRecorderLogger = internalevents.EventRecorderLogger
|
||||
|
||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||
type EventBroadcaster interface {
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
// Deprecated: use StartRecordingToSinkWithContext instead.
|
||||
StartRecordingToSink(stopCh <-chan struct{})
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
StartRecordingToSinkWithContext(ctx context.Context) error
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder
|
||||
NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger
|
||||
|
||||
// StartEventWatcher enables you to watch for emitted events without usage
|
||||
// of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests).
|
||||
@@ -59,8 +50,14 @@ type EventBroadcaster interface {
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured
|
||||
// logging function. The return value can be ignored or used to stop recording, if desired.
|
||||
// Deprecated: use StartLogging instead.
|
||||
StartStructuredLogging(verbosity klog.Level) func()
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the structured logger.
|
||||
// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`).
|
||||
// The returned function can be ignored or used to stop recording, if desired.
|
||||
StartLogging(logger klog.Logger) (func(), error)
|
||||
|
||||
// Shutdown shuts down the broadcaster
|
||||
Shutdown()
|
||||
}
|
||||
@@ -70,9 +67,9 @@ type EventBroadcaster interface {
|
||||
// It is assumed that EventSink will return the same sorts of errors as
|
||||
// client-go's REST client.
|
||||
type EventSink interface {
|
||||
Create(event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Update(event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error)
|
||||
Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error)
|
||||
}
|
||||
|
||||
// EventBroadcasterAdapter is a auxiliary interface to simplify migration to
|
||||
@@ -85,10 +82,10 @@ type EventBroadcasterAdapter interface {
|
||||
StartRecordingToSink(stopCh <-chan struct{})
|
||||
|
||||
// NewRecorder creates a new Event Recorder with specified name.
|
||||
NewRecorder(name string) EventRecorder
|
||||
NewRecorder(name string) EventRecorderLogger
|
||||
|
||||
// DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name.
|
||||
DeprecatedNewLegacyRecorder(name string) record.EventRecorder
|
||||
DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger
|
||||
|
||||
// Shutdown shuts down the broadcaster.
|
||||
Shutdown()
|
||||
|
||||
59
vendor/k8s.io/client-go/tools/internal/events/interfaces.go
generated
vendored
Normal file
59
vendor/k8s.io/client-go/tools/internal/events/interfaces.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package internal is needed to break an import cycle: record.EventRecorderAdapter
|
||||
// needs this interface definition to implement it, but event.NewEventBroadcasterAdapter
|
||||
// needs record.NewBroadcaster. Therefore this interface cannot be in event/interfaces.go.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// EventRecorder knows how to record events on behalf of an EventSource.
|
||||
type EventRecorder interface {
|
||||
// Eventf constructs an event from the given information and puts it in the queue for sending.
|
||||
// 'regarding' is the object this event is about. Event will make a reference-- or you may also
|
||||
// pass a reference to the object directly.
|
||||
// 'related' is the secondary object for more complex actions. E.g. when regarding object triggers
|
||||
// a creation or deletion of related object.
|
||||
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
|
||||
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
|
||||
// to automate handling of events, so imagine people writing switch statements to handle them.
|
||||
// You want to make that easy.
|
||||
// 'action' explains what happened with regarding/what action did the ReportingController
|
||||
// (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.)
|
||||
// take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter).
|
||||
// 'note' is intended to be human readable.
|
||||
Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{})
|
||||
}
|
||||
|
||||
// EventRecorderLogger extends EventRecorder such that a logger can
|
||||
// be set for methods in EventRecorder. Normally, those methods
|
||||
// uses the global default logger to record errors and debug messages.
|
||||
// If that is not desired, use WithLogger to provide a logger instance.
|
||||
type EventRecorderLogger interface {
|
||||
EventRecorder
|
||||
|
||||
// WithLogger replaces the context used for logging. This is a cheap call
|
||||
// and meant to be used for contextual logging:
|
||||
// recorder := ...
|
||||
// logger := klog.FromContext(ctx)
|
||||
// recorder.WithLogger(logger).Eventf(...)
|
||||
WithLogger(logger klog.Logger) EventRecorderLogger
|
||||
}
|
||||
20
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
20
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@@ -64,9 +64,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -100,6 +99,11 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
|
||||
if lec.Lock == nil {
|
||||
return nil, fmt.Errorf("Lock must not be nil.")
|
||||
}
|
||||
id := lec.Lock.Identity()
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("Lock identity is empty")
|
||||
}
|
||||
|
||||
le := LeaderElector{
|
||||
config: lec,
|
||||
clock: clock.RealClock{},
|
||||
@@ -199,9 +203,7 @@ type LeaderElector struct {
|
||||
// stopped holding the leader lease
|
||||
func (le *LeaderElector) Run(ctx context.Context) {
|
||||
defer runtime.HandleCrash()
|
||||
defer func() {
|
||||
le.config.Callbacks.OnStoppedLeading()
|
||||
}()
|
||||
defer le.config.Callbacks.OnStoppedLeading()
|
||||
|
||||
if !le.acquire(ctx) {
|
||||
return // ctx signalled done
|
||||
@@ -263,6 +265,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
|
||||
|
||||
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
|
||||
func (le *LeaderElector) renew(ctx context.Context) {
|
||||
defer le.config.Lock.RecordEvent("stopped leading")
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wait.Until(func() {
|
||||
@@ -278,7 +281,6 @@ func (le *LeaderElector) renew(ctx context.Context) {
|
||||
klog.V(5).Infof("successfully renewed lease %v", desc)
|
||||
return
|
||||
}
|
||||
le.config.Lock.RecordEvent("stopped leading")
|
||||
le.metrics.leaderOff(le.config.Name)
|
||||
klog.Infof("failed to renew lease %v: %v", desc, err)
|
||||
cancel()
|
||||
@@ -295,7 +297,7 @@ func (le *LeaderElector) release() bool {
|
||||
if !le.IsLeader() {
|
||||
return true
|
||||
}
|
||||
now := metav1.Now()
|
||||
now := metav1.NewTime(le.clock.Now())
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaseDurationSeconds: 1,
|
||||
@@ -315,7 +317,7 @@ func (le *LeaderElector) release() bool {
|
||||
// else it tries to renew the lease if it has already been acquired. Returns true
|
||||
// on success else returns false.
|
||||
func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
now := metav1.Now()
|
||||
now := metav1.NewTime(le.clock.Now())
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
HolderIdentity: le.config.Lock.Identity(),
|
||||
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
|
||||
@@ -347,7 +349,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
le.observedRawRecord = oldLeaderElectionRawRecord
|
||||
}
|
||||
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
|
||||
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
|
||||
le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) &&
|
||||
!le.IsLeader() {
|
||||
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
|
||||
return false
|
||||
|
||||
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// TODO: This is almost a exact replica of Endpoints lock.
|
||||
// going forwards as we self host more and more components
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type configMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of a
|
||||
// ConfigMapMeta object that the LeaderElector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
Client corev1client.ConfigMapsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
cm *v1.ConfigMap
|
||||
}
|
||||
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cml.cm = cm
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (cml *configMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(ctx, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cml.ConfigMapMeta.Name,
|
||||
Namespace: cml.ConfigMapMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update an existing annotation on a given resource.
|
||||
func (cml *configMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("configmap not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm = cm
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (cml *configMapLock) RecordEvent(s string) {
|
||||
if cml.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
|
||||
subject := &v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "ConfigMap"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
cml.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *configMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (cml *configMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
||||
121
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
121
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type endpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
Client corev1client.EndpointsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
e *v1.Endpoints
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
ep, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
el.e = ep
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *endpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(ctx, &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *endpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *endpointsLock) RecordEvent(s string) {
|
||||
if el.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
|
||||
subject := &v1.Endpoints{ObjectMeta: el.e.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "Endpoints"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
el.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *endpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (el *endpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
||||
42
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
42
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@@ -34,7 +34,7 @@ const (
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// When using endpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
@@ -67,8 +67,8 @@ const (
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using configMapsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
@@ -101,7 +101,7 @@ const (
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@@ -164,22 +164,6 @@ type Interface interface {
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
|
||||
endpointsLock := &endpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
configmapLock := &configMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
leaseLock := &LeaseLock{
|
||||
LeaseMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
@@ -190,21 +174,15 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
}
|
||||
switch lockType {
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", EndpointsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", ConfigMapsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case EndpointsLeasesResourceLock:
|
||||
return &MultiLock{
|
||||
Primary: endpointsLock,
|
||||
Secondary: leaseLock,
|
||||
}, nil
|
||||
case ConfigMapsLeasesResourceLock:
|
||||
return &MultiLock{
|
||||
Primary: configmapLock,
|
||||
Secondary: leaseLock,
|
||||
}, nil
|
||||
case endpointsLeasesResourceLock:
|
||||
return nil, fmt.Errorf("endpointsleases lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case configMapsLeasesResourceLock:
|
||||
return nil, fmt.Errorf("configmapsleases lock is removed, migrated to %s", LeasesResourceLock)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
|
||||
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
|
||||
r.LeaderTransitions = int(*spec.LeaseTransitions)
|
||||
}
|
||||
if spec.AcquireTime != nil {
|
||||
r.AcquireTime = metav1.Time{spec.AcquireTime.Time}
|
||||
r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time}
|
||||
}
|
||||
if spec.RenewTime != nil {
|
||||
r.RenewTime = metav1.Time{spec.RenewTime.Time}
|
||||
r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
|
||||
}
|
||||
return &r
|
||||
|
||||
@@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L
|
||||
return coordinationv1.LeaseSpec{
|
||||
HolderIdentity: &ler.HolderIdentity,
|
||||
LeaseDurationSeconds: &leaseDurationSeconds,
|
||||
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
|
||||
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
|
||||
AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time},
|
||||
RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time},
|
||||
LeaseTransitions: &leaseTransitions,
|
||||
}
|
||||
}
|
||||
|
||||
65
vendor/k8s.io/client-go/tools/metrics/metrics.go
generated
vendored
65
vendor/k8s.io/client-go/tools/metrics/metrics.go
generated
vendored
@@ -42,6 +42,10 @@ type LatencyMetric interface {
|
||||
Observe(ctx context.Context, verb string, u url.URL, latency time.Duration)
|
||||
}
|
||||
|
||||
type ResolverLatencyMetric interface {
|
||||
Observe(ctx context.Context, host string, latency time.Duration)
|
||||
}
|
||||
|
||||
// SizeMetric observes client response size partitioned by verb and host.
|
||||
type SizeMetric interface {
|
||||
Observe(ctx context.Context, verb string, host string, size float64)
|
||||
@@ -58,6 +62,23 @@ type CallsMetric interface {
|
||||
Increment(exitCode int, callStatus string)
|
||||
}
|
||||
|
||||
// RetryMetric counts the number of retries sent to the server
|
||||
// partitioned by code, method, and host.
|
||||
type RetryMetric interface {
|
||||
IncrementRetry(ctx context.Context, code string, method string, host string)
|
||||
}
|
||||
|
||||
// TransportCacheMetric shows the number of entries in the internal transport cache
|
||||
type TransportCacheMetric interface {
|
||||
Observe(value int)
|
||||
}
|
||||
|
||||
// TransportCreateCallsMetric counts the number of times a transport is created
|
||||
// partitioned by the result of the cache: hit, miss, uncacheable
|
||||
type TransportCreateCallsMetric interface {
|
||||
Increment(result string)
|
||||
}
|
||||
|
||||
var (
|
||||
// ClientCertExpiry is the expiry time of a client certificate
|
||||
ClientCertExpiry ExpiryMetric = noopExpiry{}
|
||||
@@ -65,6 +86,8 @@ var (
|
||||
ClientCertRotationAge DurationMetric = noopDuration{}
|
||||
// RequestLatency is the latency metric that rest clients will update.
|
||||
RequestLatency LatencyMetric = noopLatency{}
|
||||
// ResolverLatency is the latency metric that DNS resolver will update
|
||||
ResolverLatency ResolverLatencyMetric = noopResolverLatency{}
|
||||
// RequestSize is the request size metric that rest clients will update.
|
||||
RequestSize SizeMetric = noopSize{}
|
||||
// ResponseSize is the response size metric that rest clients will update.
|
||||
@@ -76,6 +99,15 @@ var (
|
||||
// ExecPluginCalls is the number of calls made to an exec plugin, partitioned by
|
||||
// exit code and call status.
|
||||
ExecPluginCalls CallsMetric = noopCalls{}
|
||||
// RequestRetry is the retry metric that tracks the number of
|
||||
// retries sent to the server.
|
||||
RequestRetry RetryMetric = noopRetry{}
|
||||
// TransportCacheEntries is the metric that tracks the number of entries in the
|
||||
// internal transport cache.
|
||||
TransportCacheEntries TransportCacheMetric = noopTransportCache{}
|
||||
// TransportCreateCalls is the metric that counts the number of times a new transport
|
||||
// is created
|
||||
TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{}
|
||||
)
|
||||
|
||||
// RegisterOpts contains all the metrics to register. Metrics may be nil.
|
||||
@@ -83,11 +115,15 @@ type RegisterOpts struct {
|
||||
ClientCertExpiry ExpiryMetric
|
||||
ClientCertRotationAge DurationMetric
|
||||
RequestLatency LatencyMetric
|
||||
ResolverLatency ResolverLatencyMetric
|
||||
RequestSize SizeMetric
|
||||
ResponseSize SizeMetric
|
||||
RateLimiterLatency LatencyMetric
|
||||
RequestResult ResultMetric
|
||||
ExecPluginCalls CallsMetric
|
||||
RequestRetry RetryMetric
|
||||
TransportCacheEntries TransportCacheMetric
|
||||
TransportCreateCalls TransportCreateCallsMetric
|
||||
}
|
||||
|
||||
// Register registers metrics for the rest client to use. This can
|
||||
@@ -103,6 +139,9 @@ func Register(opts RegisterOpts) {
|
||||
if opts.RequestLatency != nil {
|
||||
RequestLatency = opts.RequestLatency
|
||||
}
|
||||
if opts.ResolverLatency != nil {
|
||||
ResolverLatency = opts.ResolverLatency
|
||||
}
|
||||
if opts.RequestSize != nil {
|
||||
RequestSize = opts.RequestSize
|
||||
}
|
||||
@@ -118,6 +157,15 @@ func Register(opts RegisterOpts) {
|
||||
if opts.ExecPluginCalls != nil {
|
||||
ExecPluginCalls = opts.ExecPluginCalls
|
||||
}
|
||||
if opts.RequestRetry != nil {
|
||||
RequestRetry = opts.RequestRetry
|
||||
}
|
||||
if opts.TransportCacheEntries != nil {
|
||||
TransportCacheEntries = opts.TransportCacheEntries
|
||||
}
|
||||
if opts.TransportCreateCalls != nil {
|
||||
TransportCreateCalls = opts.TransportCreateCalls
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -133,6 +181,11 @@ type noopLatency struct{}
|
||||
|
||||
func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {}
|
||||
|
||||
type noopResolverLatency struct{}
|
||||
|
||||
func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) {
|
||||
}
|
||||
|
||||
type noopSize struct{}
|
||||
|
||||
func (noopSize) Observe(context.Context, string, string, float64) {}
|
||||
@@ -144,3 +197,15 @@ func (noopResult) Increment(context.Context, string, string, string) {}
|
||||
type noopCalls struct{}
|
||||
|
||||
func (noopCalls) Increment(int, string) {}
|
||||
|
||||
type noopRetry struct{}
|
||||
|
||||
func (noopRetry) IncrementRetry(context.Context, string, string, string) {}
|
||||
|
||||
type noopTransportCache struct{}
|
||||
|
||||
func (noopTransportCache) Observe(int) {}
|
||||
|
||||
type noopTransportCreateCalls struct{}
|
||||
|
||||
func (noopTransportCreateCalls) Increment(string) {}
|
||||
|
||||
36
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
36
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
@@ -73,7 +73,23 @@ func New(fn ListPageFunc) *ListPager {
|
||||
// List returns a single list object, but attempts to retrieve smaller chunks from the
|
||||
// server to reduce the impact on the server. If the chunk attempt fails, it will load
|
||||
// the full list instead. The Limit field on options, if unset, will default to the page size.
|
||||
//
|
||||
// If items in the returned list are retained for different durations, and you want to avoid
|
||||
// retaining the whole slice returned by p.PageFn as long as any item is referenced,
|
||||
// use ListWithAlloc instead.
|
||||
func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
|
||||
return p.list(ctx, options, false)
|
||||
}
|
||||
|
||||
// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn.
|
||||
// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn.
|
||||
//
|
||||
// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency.
|
||||
func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
|
||||
return p.list(ctx, options, true)
|
||||
}
|
||||
|
||||
func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) {
|
||||
if options.Limit == 0 {
|
||||
options.Limit = p.PageSize
|
||||
}
|
||||
@@ -123,7 +139,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
||||
list.ResourceVersion = m.GetResourceVersion()
|
||||
list.SelfLink = m.GetSelfLink()
|
||||
}
|
||||
if err := meta.EachListItem(obj, func(obj runtime.Object) error {
|
||||
eachListItemFunc := meta.EachListItem
|
||||
if allocNew {
|
||||
eachListItemFunc = meta.EachListItemWithAlloc
|
||||
}
|
||||
if err := eachListItemFunc(obj, func(obj runtime.Object) error {
|
||||
list.Items = append(list.Items, obj)
|
||||
return nil
|
||||
}); err != nil {
|
||||
@@ -156,12 +176,26 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
||||
//
|
||||
// Items are retrieved in chunks from the server to reduce the impact on the server with up to
|
||||
// ListPager.PageBufferSize chunks buffered concurrently in the background.
|
||||
//
|
||||
// If items passed to fn are retained for different durations, and you want to avoid
|
||||
// retaining the whole slice returned by p.PageFn as long as any item is referenced,
|
||||
// use EachListItemWithAlloc instead.
|
||||
func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
|
||||
return meta.EachListItem(obj, fn)
|
||||
})
|
||||
}
|
||||
|
||||
// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn.
|
||||
// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn.
|
||||
//
|
||||
// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
|
||||
func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
|
||||
return meta.EachListItemWithAlloc(obj, fn)
|
||||
})
|
||||
}
|
||||
|
||||
// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on
|
||||
// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does
|
||||
// not return an error, any error encountered while retrieving the list from the server is
|
||||
|
||||
214
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
214
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package record
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
internalevents "k8s.io/client-go/tools/internal/events"
|
||||
"k8s.io/client-go/tools/record/util"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -109,6 +111,21 @@ type EventRecorder interface {
|
||||
AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{})
|
||||
}
|
||||
|
||||
// EventRecorderLogger extends EventRecorder such that a logger can
|
||||
// be set for methods in EventRecorder. Normally, those methods
|
||||
// uses the global default logger to record errors and debug messages.
|
||||
// If that is not desired, use WithLogger to provide a logger instance.
|
||||
type EventRecorderLogger interface {
|
||||
EventRecorder
|
||||
|
||||
// WithLogger replaces the context used for logging. This is a cheap call
|
||||
// and meant to be used for contextual logging:
|
||||
// recorder := ...
|
||||
// logger := klog.FromContext(ctx)
|
||||
// recorder.WithLogger(logger).Eventf(...)
|
||||
WithLogger(logger klog.Logger) EventRecorderLogger
|
||||
}
|
||||
|
||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||
type EventBroadcaster interface {
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given
|
||||
@@ -130,21 +147,25 @@ type EventBroadcaster interface {
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger
|
||||
|
||||
// Shutdown shuts down the broadcaster
|
||||
// Shutdown shuts down the broadcaster. Once the broadcaster is shut
|
||||
// down, it will only try to record an event in a sink once before
|
||||
// giving up on it with an error message.
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder
|
||||
// implementing the new "k8s.io/client-go/tools/events".EventRecorder interface.
|
||||
type EventRecorderAdapter struct {
|
||||
recorder EventRecorder
|
||||
recorder EventRecorderLogger
|
||||
}
|
||||
|
||||
var _ internalevents.EventRecorder = &EventRecorderAdapter{}
|
||||
|
||||
// NewEventRecorderAdapter returns an adapter implementing the new
|
||||
// "k8s.io/client-go/tools/events".EventRecorder interface.
|
||||
func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter {
|
||||
func NewEventRecorderAdapter(recorder EventRecorderLogger) *EventRecorderAdapter {
|
||||
return &EventRecorderAdapter{
|
||||
recorder: recorder,
|
||||
}
|
||||
@@ -155,33 +176,84 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re
|
||||
a.recorder.Eventf(regarding, eventtype, reason, note, args...)
|
||||
}
|
||||
|
||||
func (a *EventRecorderAdapter) WithLogger(logger klog.Logger) internalevents.EventRecorderLogger {
|
||||
return &EventRecorderAdapter{
|
||||
recorder: a.recorder.WithLogger(logger),
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new event broadcaster.
|
||||
func NewBroadcaster() EventBroadcaster {
|
||||
return &eventBroadcasterImpl{
|
||||
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
|
||||
func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster {
|
||||
c := config{
|
||||
sleepDuration: defaultSleepDuration,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&c)
|
||||
}
|
||||
eventBroadcaster := &eventBroadcasterImpl{
|
||||
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
|
||||
sleepDuration: c.sleepDuration,
|
||||
options: c.CorrelatorOptions,
|
||||
}
|
||||
ctx := c.Context
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
} else {
|
||||
// Calling Shutdown is not required when a context was provided:
|
||||
// when the context is canceled, this goroutine will shut down
|
||||
// the broadcaster.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
eventBroadcaster.Broadcaster.Shutdown()
|
||||
}()
|
||||
}
|
||||
eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx)
|
||||
return eventBroadcaster
|
||||
}
|
||||
|
||||
func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster {
|
||||
return &eventBroadcasterImpl{
|
||||
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
|
||||
sleepDuration: sleepDuration,
|
||||
}
|
||||
return NewBroadcaster(WithSleepDuration(sleepDuration))
|
||||
}
|
||||
|
||||
func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster {
|
||||
return &eventBroadcasterImpl{
|
||||
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
|
||||
sleepDuration: defaultSleepDuration,
|
||||
options: options,
|
||||
return NewBroadcaster(WithCorrelatorOptions(options))
|
||||
}
|
||||
|
||||
func WithCorrelatorOptions(options CorrelatorOptions) BroadcasterOption {
|
||||
return func(c *config) {
|
||||
c.CorrelatorOptions = options
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext sets a context for the broadcaster. Canceling the context will
|
||||
// shut down the broadcaster, Shutdown doesn't need to be called. The context
|
||||
// can also be used to provide a logger.
|
||||
func WithContext(ctx context.Context) BroadcasterOption {
|
||||
return func(c *config) {
|
||||
c.Context = ctx
|
||||
}
|
||||
}
|
||||
|
||||
func WithSleepDuration(sleepDuration time.Duration) BroadcasterOption {
|
||||
return func(c *config) {
|
||||
c.sleepDuration = sleepDuration
|
||||
}
|
||||
}
|
||||
|
||||
type BroadcasterOption func(*config)
|
||||
|
||||
type config struct {
|
||||
CorrelatorOptions
|
||||
context.Context
|
||||
sleepDuration time.Duration
|
||||
}
|
||||
|
||||
type eventBroadcasterImpl struct {
|
||||
*watch.Broadcaster
|
||||
sleepDuration time.Duration
|
||||
options CorrelatorOptions
|
||||
sleepDuration time.Duration
|
||||
options CorrelatorOptions
|
||||
cancelationCtx context.Context
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
@@ -191,15 +263,16 @@ func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interf
|
||||
eventCorrelator := NewEventCorrelatorWithOptions(e.options)
|
||||
return e.StartEventWatcher(
|
||||
func(event *v1.Event) {
|
||||
recordToSink(sink, event, eventCorrelator, e.sleepDuration)
|
||||
e.recordToSink(sink, event, eventCorrelator)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) Shutdown() {
|
||||
e.Broadcaster.Shutdown()
|
||||
e.cancel()
|
||||
}
|
||||
|
||||
func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) {
|
||||
func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator) {
|
||||
// Make a copy before modification, because there could be multiple listeners.
|
||||
// Events are safe to copy like this.
|
||||
eventCopy := *event
|
||||
@@ -213,20 +286,26 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela
|
||||
}
|
||||
tries := 0
|
||||
for {
|
||||
if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) {
|
||||
if recordEvent(e.cancelationCtx, sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) {
|
||||
break
|
||||
}
|
||||
tries++
|
||||
if tries >= maxTriesPerEvent {
|
||||
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
||||
klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event)
|
||||
break
|
||||
}
|
||||
|
||||
// Randomize the first sleep so that various clients won't all be
|
||||
// synced up if the master goes down.
|
||||
delay := e.sleepDuration
|
||||
if tries == 1 {
|
||||
time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64()))
|
||||
} else {
|
||||
time.Sleep(sleepDuration)
|
||||
delay = time.Duration(float64(delay) * rand.Float64())
|
||||
}
|
||||
select {
|
||||
case <-e.cancelationCtx.Done():
|
||||
klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (broadcaster is shut down)", "event", event)
|
||||
return
|
||||
case <-time.After(delay):
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -235,7 +314,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela
|
||||
// was successfully recorded or discarded, false if it should be retried.
|
||||
// If updateExistingEvent is false, it creates a new event, otherwise it updates
|
||||
// existing event.
|
||||
func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
|
||||
func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
|
||||
var newEvent *v1.Event
|
||||
var err error
|
||||
if updateExistingEvent {
|
||||
@@ -258,13 +337,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
switch err.(type) {
|
||||
case *restclient.RequestConstructionError:
|
||||
// We will construct the request the same next time, so don't keep trying.
|
||||
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event)
|
||||
return true
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) {
|
||||
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||
klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err)
|
||||
} else {
|
||||
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event)
|
||||
}
|
||||
return true
|
||||
case *errors.UnexpectedObjectError:
|
||||
@@ -273,7 +352,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)", "event", event)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -286,12 +365,15 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int
|
||||
})
|
||||
}
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to a structured logger.
|
||||
// The logger is retrieved from a context if the broadcaster was constructed with a context, otherwise
|
||||
// the global default is used.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface {
|
||||
loggerV := klog.FromContext(e.cancelationCtx).V(int(verbosity))
|
||||
return e.StartEventWatcher(
|
||||
func(e *v1.Event) {
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
loggerV.Info("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -300,26 +382,32 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)")
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for watchEvent := range watcher.ResultChan() {
|
||||
event, ok := watchEvent.Object.(*v1.Event)
|
||||
if !ok {
|
||||
// This is all local, so there's no reason this should
|
||||
// ever happen.
|
||||
continue
|
||||
for {
|
||||
select {
|
||||
case <-e.cancelationCtx.Done():
|
||||
watcher.Stop()
|
||||
return
|
||||
case watchEvent := <-watcher.ResultChan():
|
||||
event, ok := watchEvent.Object.(*v1.Event)
|
||||
if !ok {
|
||||
// This is all local, so there's no reason this should
|
||||
// ever happen.
|
||||
continue
|
||||
}
|
||||
eventHandler(event)
|
||||
}
|
||||
eventHandler(event)
|
||||
}
|
||||
}()
|
||||
return watcher
|
||||
}
|
||||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder {
|
||||
return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger {
|
||||
return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()}
|
||||
}
|
||||
|
||||
type recorderImpl struct {
|
||||
@@ -329,21 +417,26 @@ type recorderImpl struct {
|
||||
clock clock.PassiveClock
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) {
|
||||
var _ EventRecorder = &recorderImpl{}
|
||||
|
||||
func (recorder *recorderImpl) generateEvent(logger klog.Logger, object runtime.Object, annotations map[string]string, eventtype, reason, message string) {
|
||||
ref, err := ref.GetReference(recorder.scheme, object)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
|
||||
logger.Error(err, "Could not construct reference, will not report event", "object", object, "eventType", eventtype, "reason", reason, "message", message)
|
||||
return
|
||||
}
|
||||
|
||||
if !util.ValidateEventType(eventtype) {
|
||||
klog.Errorf("Unsupported event type: '%v'", eventtype)
|
||||
logger.Error(nil, "Unsupported event type", "eventType", eventtype)
|
||||
return
|
||||
}
|
||||
|
||||
event := recorder.makeEvent(ref, annotations, eventtype, reason, message)
|
||||
event.Source = recorder.source
|
||||
|
||||
event.ReportingInstance = recorder.source.Host
|
||||
event.ReportingController = recorder.source.Component
|
||||
|
||||
// NOTE: events should be a non-blocking operation, but we also need to not
|
||||
// put this in a goroutine, otherwise we'll race to write to a closed channel
|
||||
// when we go to shut down this broadcaster. Just drop events if we get overloaded,
|
||||
@@ -351,16 +444,16 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
|
||||
// outgoing events anyway).
|
||||
sent, err := recorder.ActionOrDrop(watch.Added, event)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to record event: %v (will not retry!)", err)
|
||||
logger.Error(err, "Unable to record event (will not retry!)")
|
||||
return
|
||||
}
|
||||
if !sent {
|
||||
klog.Errorf("unable to record event: too many queued events, dropped event %#v", event)
|
||||
logger.Error(nil, "Unable to record event: too many queued events, dropped event", "event", event)
|
||||
}
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
recorder.generateEvent(object, nil, eventtype, reason, message)
|
||||
recorder.generateEvent(klog.Background(), object, nil, eventtype, reason, message)
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
@@ -368,7 +461,7 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
recorder.generateEvent(klog.Background(), object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event {
|
||||
@@ -392,3 +485,26 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map
|
||||
Type: eventtype,
|
||||
}
|
||||
}
|
||||
|
||||
type recorderImplLogger struct {
|
||||
*recorderImpl
|
||||
logger klog.Logger
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &recorderImplLogger{}
|
||||
|
||||
func (recorder recorderImplLogger) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
recorder.recorderImpl.generateEvent(recorder.logger, object, nil, eventtype, reason, message)
|
||||
}
|
||||
|
||||
func (recorder recorderImplLogger) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder recorderImplLogger) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.generateEvent(recorder.logger, object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger}
|
||||
}
|
||||
|
||||
32
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
32
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
|
||||
@@ -31,6 +32,8 @@ type FakeRecorder struct {
|
||||
IncludeObject bool
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &FakeRecorder{}
|
||||
|
||||
func objectString(object runtime.Object, includeObject bool) string {
|
||||
if !includeObject {
|
||||
return ""
|
||||
@@ -41,20 +44,35 @@ func objectString(object runtime.Object, includeObject bool) string {
|
||||
)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject))
|
||||
func annotationsString(annotations map[string]string) string {
|
||||
if len(annotations) == 0 {
|
||||
return ""
|
||||
} else {
|
||||
return " " + fmt.Sprint(annotations)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) writeEvent(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) +
|
||||
objectString(object, f.IncludeObject) + annotationsString(annotations)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
f.writeEvent(object, nil, eventtype, reason, "%s", message)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject)
|
||||
}
|
||||
f.writeEvent(object, nil, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
f.Eventf(object, eventtype, reason, messageFmt, args...)
|
||||
f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFakeRecorder creates new fake event recorder with event channel with
|
||||
|
||||
57
vendor/k8s.io/client-go/tools/remotecommand/fallback.go
generated
vendored
Normal file
57
vendor/k8s.io/client-go/tools/remotecommand/fallback.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
var _ Executor = &fallbackExecutor{}
|
||||
|
||||
type fallbackExecutor struct {
|
||||
primary Executor
|
||||
secondary Executor
|
||||
shouldFallback func(error) bool
|
||||
}
|
||||
|
||||
// NewFallbackExecutor creates an Executor that first attempts to use the
|
||||
// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial
|
||||
// websocket "StreamWithContext" call fails.
|
||||
// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) {
|
||||
return &fallbackExecutor{
|
||||
primary: primary,
|
||||
secondary: secondary,
|
||||
shouldFallback: shouldFallback,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream is deprecated. Please use "StreamWithContext".
|
||||
func (f *fallbackExecutor) Stream(options StreamOptions) error {
|
||||
return f.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// StreamWithContext initially attempts to call "StreamWithContext" using the
|
||||
// primary executor, falling back to calling the secondary executor if the
|
||||
// initial primary call to upgrade to a websocket connection fails.
|
||||
func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
err := f.primary.StreamWithContext(ctx, options)
|
||||
if f.shouldFallback(err) {
|
||||
return f.secondary.StreamWithContext(ctx, options)
|
||||
}
|
||||
return err
|
||||
}
|
||||
124
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
124
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
@@ -18,17 +18,10 @@ package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
)
|
||||
|
||||
// StreamOptions holds information pertaining to the current streaming session:
|
||||
@@ -63,120 +56,3 @@ type streamCreator interface {
|
||||
type streamProtocolHandler interface {
|
||||
stream(conn streamCreator) error
|
||||
}
|
||||
|
||||
// streamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type streamExecutor struct {
|
||||
upgrader spdy.Upgrader
|
||||
transport http.RoundTripper
|
||||
|
||||
method string
|
||||
url *url.URL
|
||||
protocols []string
|
||||
}
|
||||
|
||||
// NewSPDYExecutor connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams.
|
||||
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
|
||||
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
|
||||
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
return NewSPDYExecutorForProtocols(
|
||||
transport, upgrader, method, url,
|
||||
remotecommand.StreamProtocolV4Name,
|
||||
remotecommand.StreamProtocolV3Name,
|
||||
remotecommand.StreamProtocolV2Name,
|
||||
remotecommand.StreamProtocolV1Name,
|
||||
)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
|
||||
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
|
||||
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
|
||||
return &streamExecutor{
|
||||
upgrader: upgrader,
|
||||
transport: transport,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects.
|
||||
func (e *streamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it.
|
||||
func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
conn, protocol, err := spdy.Negotiate(
|
||||
e.upgrader,
|
||||
&http.Client{Transport: e.transport},
|
||||
req,
|
||||
e.protocols...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
|
||||
switch protocol {
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
return conn, streamer, nil
|
||||
}
|
||||
|
||||
// StreamWithContext opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects or the context is done.
|
||||
func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
conn, streamer, err := e.newConnectionAndStream(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
errorChan <- streamer.stream(conn)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
171
vendor/k8s.io/client-go/tools/remotecommand/spdy.go
generated
vendored
Normal file
171
vendor/k8s.io/client-go/tools/remotecommand/spdy.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type spdyStreamExecutor struct {
|
||||
upgrader spdy.Upgrader
|
||||
transport http.RoundTripper
|
||||
|
||||
method string
|
||||
url *url.URL
|
||||
protocols []string
|
||||
rejectRedirects bool // if true, receiving redirect from upstream is an error
|
||||
}
|
||||
|
||||
// NewSPDYExecutor connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams.
|
||||
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future
|
||||
// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated)
|
||||
// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect
|
||||
// during the attempted upgrade in these "Stream" calls, an error is returned.
|
||||
func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spdyExecutor := executor.(*spdyStreamExecutor)
|
||||
spdyExecutor.rejectRedirects = true
|
||||
return spdyExecutor, nil
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
|
||||
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
|
||||
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
return NewSPDYExecutorForProtocols(
|
||||
transport, upgrader, method, url,
|
||||
remotecommand.StreamProtocolV5Name,
|
||||
remotecommand.StreamProtocolV4Name,
|
||||
remotecommand.StreamProtocolV3Name,
|
||||
remotecommand.StreamProtocolV2Name,
|
||||
remotecommand.StreamProtocolV1Name,
|
||||
)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
|
||||
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
|
||||
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
|
||||
return &spdyStreamExecutor{
|
||||
upgrader: upgrader,
|
||||
transport: transport,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects.
|
||||
func (e *spdyStreamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it.
|
||||
func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{Transport: e.transport}
|
||||
if e.rejectRedirects {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return fmt.Errorf("redirect not allowed")
|
||||
}
|
||||
}
|
||||
conn, protocol, err := spdy.Negotiate(
|
||||
e.upgrader,
|
||||
&client,
|
||||
req,
|
||||
e.protocols...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
|
||||
switch protocol {
|
||||
case remotecommand.StreamProtocolV5Name:
|
||||
streamer = newStreamProtocolV5(options)
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
return conn, streamer, nil
|
||||
}
|
||||
|
||||
// StreamWithContext opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects or the context is done.
|
||||
func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
conn, streamer, err := e.newConnectionAndStream(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
errorChan <- streamer.stream(conn)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
35
vendor/k8s.io/client-go/tools/remotecommand/v5.go
generated
vendored
Normal file
35
vendor/k8s.io/client-go/tools/remotecommand/v5.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
// streamProtocolV5 add support for V5 of the remote command subprotocol.
|
||||
// For the streamProtocolHandler, this version is the same as V4.
|
||||
type streamProtocolV5 struct {
|
||||
*streamProtocolV4
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV5{}
|
||||
|
||||
func newStreamProtocolV5(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV5{
|
||||
streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV5) stream(conn streamCreator) error {
|
||||
return p.streamProtocolV4.stream(conn)
|
||||
}
|
||||
502
vendor/k8s.io/client-go/tools/remotecommand/websocket.go
generated
vendored
Normal file
502
vendor/k8s.io/client-go/tools/remotecommand/websocket.go
generated
vendored
Normal file
@@ -0,0 +1,502 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gwebsocket "github.com/gorilla/websocket"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/websocket"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// writeDeadline defines the time that a write to the websocket connection
|
||||
// must complete by, otherwise an i/o timeout occurs. The writeDeadline
|
||||
// has nothing to do with a response from the other websocket connection
|
||||
// endpoint; only that the message was successfully processed by the
|
||||
// local websocket connection. The typical write deadline within the websocket
|
||||
// library is one second.
|
||||
const writeDeadline = 2 * time.Second
|
||||
|
||||
var (
|
||||
_ Executor = &wsStreamExecutor{}
|
||||
_ streamCreator = &wsStreamCreator{}
|
||||
_ httpstream.Stream = &stream{}
|
||||
|
||||
streamType2streamID = map[string]byte{
|
||||
v1.StreamTypeStdin: remotecommand.StreamStdIn,
|
||||
v1.StreamTypeStdout: remotecommand.StreamStdOut,
|
||||
v1.StreamTypeStderr: remotecommand.StreamStdErr,
|
||||
v1.StreamTypeError: remotecommand.StreamErr,
|
||||
v1.StreamTypeResize: remotecommand.StreamResize,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// pingPeriod defines how often a heartbeat "ping" message is sent.
|
||||
pingPeriod = 5 * time.Second
|
||||
// pingReadDeadline defines the time waiting for a response heartbeat
|
||||
// "pong" message before a timeout error occurs for websocket reading.
|
||||
// This duration must always be greater than the "pingPeriod". By defining
|
||||
// this deadline in terms of the ping period, we are essentially saying
|
||||
// we can drop "X-1" (e.g. 3-1=2) pings before firing the timeout.
|
||||
pingReadDeadline = (pingPeriod * 3) + (1 * time.Second)
|
||||
)
|
||||
|
||||
// wsStreamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type wsStreamExecutor struct {
|
||||
transport http.RoundTripper
|
||||
upgrader websocket.ConnectionHolder
|
||||
method string
|
||||
url string
|
||||
// requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io).
|
||||
protocols []string
|
||||
// selected protocol from the handshake process; could be empty string if handshake fails.
|
||||
negotiated string
|
||||
// period defines how often a "ping" heartbeat message is sent to the other endpoint.
|
||||
heartbeatPeriod time.Duration
|
||||
// deadline defines the amount of time before "pong" response must be received.
|
||||
heartbeatDeadline time.Duration
|
||||
}
|
||||
|
||||
func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) {
|
||||
// Only supports V5 protocol for correct version skew functionality.
|
||||
// Previous api servers will proxy upgrade requests to legacy websocket
|
||||
// servers on container runtimes which support V1-V4. These legacy
|
||||
// websocket servers will not handle the new CLOSE signal.
|
||||
return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name)
|
||||
}
|
||||
|
||||
// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection.
|
||||
func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) {
|
||||
transport, upgrader, err := websocket.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating websocket transports: %v", err)
|
||||
}
|
||||
return &wsStreamExecutor{
|
||||
transport: transport,
|
||||
upgrader: upgrader,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
heartbeatPeriod: pingPeriod,
|
||||
heartbeatDeadline: pingReadDeadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: use StreamWithContext instead to avoid possible resource leaks.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/103177 for details.
|
||||
func (e *wsStreamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various
|
||||
// goroutines to implement the necessary streams over the connection. The "options" parameter
|
||||
// defines which streams are requested. Returns an error if one occurred. This method is NOT
|
||||
// safe to run concurrently with the same executor (because of the state stored in the upgrader).
|
||||
func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conn == nil {
|
||||
panic(fmt.Errorf("websocket connection is nil"))
|
||||
}
|
||||
defer conn.Close()
|
||||
e.negotiated = conn.Subprotocol()
|
||||
klog.V(4).Infof("The subprotocol is %s", e.negotiated)
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
switch e.negotiated {
|
||||
case remotecommand.StreamProtocolV5Name:
|
||||
streamer = newStreamProtocolV5(options)
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
creator := newWSStreamCreator(conn)
|
||||
go creator.readDemuxLoop(
|
||||
e.upgrader.DataBufferSize(),
|
||||
e.heartbeatPeriod,
|
||||
e.heartbeatDeadline,
|
||||
)
|
||||
errorChan <- streamer.stream(creator)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
type wsStreamCreator struct {
|
||||
conn *gwebsocket.Conn
|
||||
// Protects writing to websocket connection; reading is lock-free
|
||||
connWriteLock sync.Mutex
|
||||
// map of stream id to stream; multiple streams read/write the connection
|
||||
streams map[byte]*stream
|
||||
streamsMu sync.Mutex
|
||||
}
|
||||
|
||||
func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator {
|
||||
return &wsStreamCreator{
|
||||
conn: conn,
|
||||
streams: map[byte]*stream{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wsStreamCreator) getStream(id byte) *stream {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
return c.streams[id]
|
||||
}
|
||||
|
||||
func (c *wsStreamCreator) setStream(id byte, s *stream) {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
c.streams[id] = s
|
||||
}
|
||||
|
||||
// CreateStream uses id from passed headers to create a stream over "c.conn" connection.
|
||||
// Returns a Stream structure or nil and an error if one occurred.
|
||||
func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) {
|
||||
streamType := headers.Get(v1.StreamType)
|
||||
id, ok := streamType2streamID[streamType]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown stream type: %s", streamType)
|
||||
}
|
||||
if s := c.getStream(id); s != nil {
|
||||
return nil, fmt.Errorf("duplicate stream for type %s", streamType)
|
||||
}
|
||||
reader, writer := io.Pipe()
|
||||
s := &stream{
|
||||
headers: headers,
|
||||
readPipe: reader,
|
||||
writePipe: writer,
|
||||
conn: c.conn,
|
||||
connWriteLock: &c.connWriteLock,
|
||||
id: id,
|
||||
}
|
||||
c.setStream(id, s)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket
|
||||
// connection. This loop reads the connection, and demultiplexes the data
|
||||
// into one of the individual stream pipes (by checking the stream id). This
|
||||
// loop can *not* be run concurrently, because there can only be one websocket
|
||||
// connection reader at a time (a read mutex would provide no benefit).
|
||||
func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) {
|
||||
// Initialize and start the ping/pong heartbeat.
|
||||
h := newHeartbeat(c.conn, period, deadline)
|
||||
// Set initial timeout for websocket connection reading.
|
||||
if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil {
|
||||
klog.Errorf("Websocket initial setting read deadline failed %v", err)
|
||||
return
|
||||
}
|
||||
go h.start()
|
||||
// Buffer size must correspond to the same size allocated
|
||||
// for the read buffer during websocket client creation. A
|
||||
// difference can cause incomplete connection reads.
|
||||
readBuffer := make([]byte, bufferSize)
|
||||
for {
|
||||
// NextReader() only returns data messages (BinaryMessage or Text
|
||||
// Message). Even though this call will never return control frames
|
||||
// such as ping, pong, or close, this call is necessary for these
|
||||
// message types to be processed. There can only be one reader
|
||||
// at a time, so this reader loop must *not* be run concurrently;
|
||||
// there is no lock for reading. Calling "NextReader()" before the
|
||||
// current reader has been processed will close the current reader.
|
||||
// If the heartbeat read deadline times out, this "NextReader()" will
|
||||
// return an i/o error, and error handling will clean up.
|
||||
messageType, r, err := c.conn.NextReader()
|
||||
if err != nil {
|
||||
websocketErr, ok := err.(*gwebsocket.CloseError)
|
||||
if ok && websocketErr.Code == gwebsocket.CloseNormalClosure {
|
||||
err = nil // readers will get io.EOF as it's a normal closure
|
||||
} else {
|
||||
err = fmt.Errorf("next reader: %w", err)
|
||||
}
|
||||
c.closeAllStreamReaders(err)
|
||||
return
|
||||
}
|
||||
// All remote command protocols send/receive only binary data messages.
|
||||
if messageType != gwebsocket.BinaryMessage {
|
||||
c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType))
|
||||
return
|
||||
}
|
||||
// It's ok to read just a single byte because the underlying library wraps the actual
|
||||
// connection with a buffered reader anyway.
|
||||
_, err = io.ReadFull(r, readBuffer[:1])
|
||||
if err != nil {
|
||||
c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err))
|
||||
return
|
||||
}
|
||||
streamID := readBuffer[0]
|
||||
s := c.getStream(streamID)
|
||||
if s == nil {
|
||||
klog.Errorf("Unknown stream id %d, discarding message", streamID)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
nr, errRead := r.Read(readBuffer)
|
||||
if nr > 0 {
|
||||
// Write the data to the stream's pipe. This can block.
|
||||
_, errWrite := s.writePipe.Write(readBuffer[:nr])
|
||||
if errWrite != nil {
|
||||
// Pipe must have been closed by the stream user.
|
||||
// Nothing to do, discard the message.
|
||||
break
|
||||
}
|
||||
}
|
||||
if errRead != nil {
|
||||
if errRead == io.EOF {
|
||||
break
|
||||
}
|
||||
c.closeAllStreamReaders(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// closeAllStreamReaders closes readers in all streams.
|
||||
// This unblocks all stream.Read() calls.
|
||||
func (c *wsStreamCreator) closeAllStreamReaders(err error) {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
for _, s := range c.streams {
|
||||
// Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes.
|
||||
_ = s.writePipe.CloseWithError(err)
|
||||
}
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
headers http.Header
|
||||
readPipe *io.PipeReader
|
||||
writePipe *io.PipeWriter
|
||||
// conn is used for writing directly into the connection.
|
||||
// Is nil after Close() / Reset() to prevent future writes.
|
||||
conn *gwebsocket.Conn
|
||||
// connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only.
|
||||
// The mutex is shared across all streams because the underlying connection is shared.
|
||||
connWriteLock *sync.Mutex
|
||||
id byte
|
||||
}
|
||||
|
||||
func (s *stream) Read(p []byte) (n int, err error) {
|
||||
return s.readPipe.Read(p)
|
||||
}
|
||||
|
||||
// Write writes directly to the underlying WebSocket connection.
|
||||
func (s *stream) Write(p []byte) (n int, err error) {
|
||||
klog.V(4).Infof("Write() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Write() done on stream %d", s.id)
|
||||
s.connWriteLock.Lock()
|
||||
defer s.connWriteLock.Unlock()
|
||||
if s.conn == nil {
|
||||
return 0, fmt.Errorf("write on closed stream %d", s.id)
|
||||
}
|
||||
err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline))
|
||||
if err != nil {
|
||||
klog.V(7).Infof("Websocket setting write deadline failed %v", err)
|
||||
return 0, err
|
||||
}
|
||||
// Message writer buffers the message data, so we don't need to do that ourselves.
|
||||
// Just write id and the data as two separate writes to avoid allocating an intermediate buffer.
|
||||
w, err := s.conn.NextWriter(gwebsocket.BinaryMessage)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if w != nil {
|
||||
w.Close()
|
||||
}
|
||||
}()
|
||||
_, err = w.Write([]byte{s.id})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = w.Write(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
err = w.Close()
|
||||
w = nil
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close half-closes the stream, indicating this side is finished with the stream.
|
||||
func (s *stream) Close() error {
|
||||
klog.V(4).Infof("Close() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Close() done on stream %d", s.id)
|
||||
s.connWriteLock.Lock()
|
||||
defer s.connWriteLock.Unlock()
|
||||
if s.conn == nil {
|
||||
return fmt.Errorf("Close() on already closed stream %d", s.id)
|
||||
}
|
||||
// Communicate the CLOSE stream signal to the other websocket endpoint.
|
||||
err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id})
|
||||
s.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *stream) Reset() error {
|
||||
klog.V(4).Infof("Reset() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Reset() done on stream %d", s.id)
|
||||
s.Close()
|
||||
return s.writePipe.Close()
|
||||
}
|
||||
|
||||
func (s *stream) Headers() http.Header {
|
||||
return s.headers
|
||||
}
|
||||
|
||||
func (s *stream) Identifier() uint32 {
|
||||
return uint32(s.id)
|
||||
}
|
||||
|
||||
// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This
|
||||
// heartbeat works by setting a read deadline on the websocket connection, then
|
||||
// pushing this deadline into the future for every successful heartbeat. If the
|
||||
// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call
|
||||
// inside the "readDemuxLoop" will return an i/o error prompting a connection close
|
||||
// and cleanup.
|
||||
type heartbeat struct {
|
||||
conn *gwebsocket.Conn
|
||||
// period defines how often a "ping" heartbeat message is sent to the other endpoint
|
||||
period time.Duration
|
||||
// closing the "closer" channel will clean up the heartbeat timers
|
||||
closer chan struct{}
|
||||
// optional data to send with "ping" message
|
||||
message []byte
|
||||
// optionally received data message with "pong" message, same as sent with ping
|
||||
pongMessage []byte
|
||||
}
|
||||
|
||||
// newHeartbeat creates heartbeat structure encapsulating fields necessary to
|
||||
// run the websocket connection ping/pong mechanism and sets up handlers on
|
||||
// the websocket connection.
|
||||
func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat {
|
||||
h := &heartbeat{
|
||||
conn: conn,
|
||||
period: period,
|
||||
closer: make(chan struct{}),
|
||||
}
|
||||
// Set up handler for receiving returned "pong" message from other endpoint
|
||||
// by pushing the read deadline into the future. The "msg" received could
|
||||
// be empty.
|
||||
h.conn.SetPongHandler(func(msg string) error {
|
||||
// Push the read deadline into the future.
|
||||
klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg)
|
||||
err := h.conn.SetReadDeadline(time.Now().Add(deadline))
|
||||
if err != nil {
|
||||
klog.Errorf("Websocket setting read deadline failed %v", err)
|
||||
return err
|
||||
}
|
||||
if len(msg) > 0 {
|
||||
h.pongMessage = []byte(msg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// Set up handler to cleanup timers when this endpoint receives "Close" message.
|
||||
closeHandler := h.conn.CloseHandler()
|
||||
h.conn.SetCloseHandler(func(code int, text string) error {
|
||||
close(h.closer)
|
||||
return closeHandler(code, text)
|
||||
})
|
||||
return h
|
||||
}
|
||||
|
||||
// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC
|
||||
// this data sent with "ping" message should be returned in "pong" message.
|
||||
func (h *heartbeat) setMessage(msg string) {
|
||||
h.message = []byte(msg)
|
||||
}
|
||||
|
||||
// start the heartbeat by setting up necesssary handlers and looping by sending "ping"
|
||||
// message every "period" until the "closer" channel is closed.
|
||||
func (h *heartbeat) start() {
|
||||
// Loop to continually send "ping" message through websocket connection every "period".
|
||||
t := time.NewTicker(h.period)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-h.closer:
|
||||
klog.V(8).Infof("closed channel--returning")
|
||||
return
|
||||
case <-t.C:
|
||||
// "WriteControl" does not need to be protected by a mutex. According to
|
||||
// gorilla/websockets library docs: "The Close and WriteControl methods can
|
||||
// be called concurrently with all other methods."
|
||||
if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(writeDeadline)); err == nil {
|
||||
klog.V(8).Infof("Websocket Ping succeeeded")
|
||||
} else {
|
||||
klog.Errorf("Websocket Ping failed: %v", err)
|
||||
if errors.Is(err, gwebsocket.ErrCloseSent) {
|
||||
// we continue because c.conn.CloseChan will manage closing the connection already
|
||||
continue
|
||||
} else if e, ok := err.(net.Error); ok && e.Timeout() {
|
||||
// Continue, in case this is a transient failure.
|
||||
// c.conn.CloseChan above will tell us when the connection is
|
||||
// actually closed.
|
||||
// If Temporary function hadn't been deprecated, we would have used it.
|
||||
// But most of temporary errors are timeout errors anyway.
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
7
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
7
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@@ -24,10 +24,9 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@@ -191,7 +190,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
errObject := apierrors.FromObject(event.Object)
|
||||
statusErr, ok := errObject.(*apierrors.StatusError)
|
||||
if !ok {
|
||||
klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object))
|
||||
klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object)))
|
||||
// Retry unknown errors
|
||||
return false, 0
|
||||
}
|
||||
@@ -220,7 +219,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
|
||||
// Log here so we have a record of hitting the unexpected error
|
||||
// and we can whitelist some error codes if we missed any that are expected.
|
||||
klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object))
|
||||
klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object)))
|
||||
|
||||
// Retry
|
||||
return false, statusDelay
|
||||
|
||||
5
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
5
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
@@ -101,8 +101,7 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions
|
||||
// It guarantees you to see all events and in the order they happened.
|
||||
// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case.
|
||||
// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing
|
||||
//
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
//
|
||||
// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges").
|
||||
func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
@@ -137,7 +136,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.
|
||||
|
||||
if precondition != nil {
|
||||
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
|
||||
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err())
|
||||
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %w", ctx.Err())
|
||||
}
|
||||
|
||||
done, err := precondition(indexer)
|
||||
|
||||
Reference in New Issue
Block a user