Bump sigs.k8s.io/controller-runtime to v0.14.4 (#5507)

* Bump sigs.k8s.io/controller-runtime to v0.14.4

* Update gofmt
This commit is contained in:
hongming
2023-02-08 14:06:15 +08:00
committed by GitHub
parent 129e6fbec3
commit 1c49fcd57e
1404 changed files with 141422 additions and 47769 deletions

View File

@@ -24,6 +24,8 @@ import (
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -31,19 +33,20 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/cacher/metrics"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
"k8s.io/client-go/tools/cache"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
utiltrace "k8s.io/utils/trace"
)
var (
@@ -69,6 +72,10 @@ type Config struct {
// An underlying storage.Versioner.
Versioner storage.Versioner
// The GroupResource the cacher is caching. Used for disambiguating *unstructured.Unstructured (CRDs) in logging
// and metrics.
GroupResource schema.GroupResource
// The Cache will be caching objects of a given Type and assumes that they
// are all stored under ResourcePrefix directory in the underlying database.
ResourcePrefix string
@@ -146,13 +153,13 @@ func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool
}
}
func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) {
func (i *indexedWatchers) terminateAll(groupResource schema.GroupResource, done func(*cacheWatcher)) {
// note that we don't have to call setDrainInputBufferLocked method on the watchers
// because we take advantage of the default value - stop immediately
// also watchers that have had already its draining strategy set
// are no longer available (they were removed from the allWatchers and the valueWatchers maps)
if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 {
klog.Warningf("Terminating all watchers from cacher %v", objectType)
klog.Warningf("Terminating all watchers from cacher %v", groupResource)
}
i.allWatchers.terminateAll(done)
for _, watchers := range i.valueWatchers {
@@ -258,6 +265,8 @@ type Cacher struct {
// Expected type of objects in the underlying cache.
objectType reflect.Type
// Used for logging, to disambiguate *unstructured.Unstructured (CRDs)
groupResource schema.GroupResource
// "sliding window" of recent changes of objects and the current state.
watchCache *watchCache
@@ -347,6 +356,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
ready: newReady(),
storage: config.Storage,
objectType: objType,
groupResource: config.GroupResource,
versioner: config.Versioner,
newFunc: config.NewFunc,
indexedTrigger: indexedTrigger,
@@ -377,7 +387,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
}
watchCache := newWatchCache(
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType)
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource)
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
@@ -422,8 +432,8 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
c.watchCache.SetOnReplace(func() {
successfulList = true
c.ready.set(true)
klog.V(1).Infof("cacher (%v): initialized", c.objectType.String())
metrics.WatchCacheInitializations.WithLabelValues(c.objectType.String()).Inc()
klog.V(1).Infof("cacher (%v): initialized", c.groupResource.String())
metrics.WatchCacheInitializations.WithLabelValues(c.groupResource.String()).Inc()
})
defer func() {
if successfulList {
@@ -437,7 +447,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
// Also note that startCaching is called in a loop, so there's no need
// to have another loop here.
if err := c.reflector.ListAndWatch(stopChannel); err != nil {
klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err)
klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.groupResource.String(), err)
}
}
@@ -508,7 +518,16 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
// given that memory allocation may trigger GC and block the thread.
// Also note that emptyFunc is a placeholder, until we will be able
// to compute watcher.forget function (which has to happen under lock).
watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType, identifier)
watcher := newCacheWatcher(
chanSize,
filterWithAttrsFunction(key, pred),
emptyFunc,
c.versioner,
deadline,
pred.AllowWatchBookmarks,
c.groupResource,
identifier,
)
// We explicitly use thread unsafe version and do locking ourself to ensure that
// no new events will be processed in the meantime. The watchCache will be unlocked
@@ -576,7 +595,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
return err
}
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(getRV, key, nil)
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(ctx, getRV, key)
if err != nil {
return err
}
@@ -615,9 +634,9 @@ func shouldDelegateList(opts storage.ListOptions) bool {
return resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact
}
func (c *Cacher) listItems(listRV uint64, key string, pred storage.SelectionPredicate, trace *utiltrace.Trace, recursive bool) ([]interface{}, uint64, string, error) {
func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred storage.SelectionPredicate, recursive bool) ([]interface{}, uint64, string, error) {
if !recursive {
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace)
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(ctx, listRV, key)
if err != nil {
return nil, 0, "", err
}
@@ -626,7 +645,7 @@ func (c *Cacher) listItems(listRV uint64, key string, pred storage.SelectionPred
}
return nil, readResourceVersion, "", nil
}
return c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace)
return c.watchCache.WaitUntilFreshAndList(ctx, listRV, pred.MatcherIndex())
}
// GetList implements storage.Interface
@@ -652,15 +671,15 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
return c.storage.GetList(ctx, key, opts, listObj)
}
trace := utiltrace.New("cacher list",
utiltrace.Field{"audit-id", endpointsrequest.GetAuditIDTruncated(ctx)},
utiltrace.Field{Key: "type", Value: c.objectType.String()})
defer trace.LogIfLong(500 * time.Millisecond)
ctx, span := tracing.Start(ctx, "cacher list",
attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)),
attribute.Stringer("type", c.groupResource))
defer span.End(500 * time.Millisecond)
if err := c.ready.wait(); err != nil {
return errors.NewServiceUnavailable(err.Error())
}
trace.Step("Ready")
span.AddEvent("Ready")
// List elements with at least 'listRV' from cache.
listPtr, err := meta.GetItemsPtr(listObj)
@@ -676,16 +695,16 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
}
filter := filterWithAttrsFunction(key, pred)
objs, readResourceVersion, indexUsed, err := c.listItems(listRV, key, pred, trace, recursive)
objs, readResourceVersion, indexUsed, err := c.listItems(ctx, listRV, key, pred, recursive)
if err != nil {
return err
}
trace.Step("Listed items from cache", utiltrace.Field{Key: "count", Value: len(objs)})
span.AddEvent("Listed items from cache", attribute.Int("count", len(objs)))
if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() {
// Resize the slice appropriately, since we already know that none
// of the elements will be filtered out.
listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs)))
trace.Step("Resized result")
span.AddEvent("Resized result")
}
for _, obj := range objs {
elem, ok := obj.(*storeElement)
@@ -696,7 +715,7 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
}
}
trace.Step("Filtered items", utiltrace.Field{Key: "count", Value: listVal.Len()})
span.AddEvent("Filtered items", attribute.Int("count", listVal.Len()))
if c.versioner != nil {
if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil {
return err
@@ -757,7 +776,7 @@ func (c *Cacher) triggerValuesThreadUnsafe(event *watchCacheEvent) ([]string, bo
func (c *Cacher) processEvent(event *watchCacheEvent) {
if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) {
// Monitor if this gets backed up, and how much.
klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen)
klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.groupResource.String(), curLen)
}
c.incoming <- *event
}
@@ -788,7 +807,7 @@ func (c *Cacher) dispatchEvents() {
c.dispatchEvent(&event)
}
lastProcessedResourceVersion = event.ResourceVersion
metrics.EventsCounter.WithLabelValues(c.objectType.String()).Inc()
metrics.EventsCounter.WithLabelValues(c.groupResource.String()).Inc()
case <-bookmarkTimer.C():
bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25))
// Never send a bookmark event if we did not see an event here, this is fine
@@ -1008,7 +1027,7 @@ func (c *Cacher) finishDispatching() {
func (c *Cacher) terminateAllWatchers() {
c.Lock()
defer c.Unlock()
c.watchers.terminateAll(c.objectType, c.stopWatcherLocked)
c.watchers.terminateAll(c.groupResource, c.stopWatcherLocked)
}
func (c *Cacher) stopWatcherLocked(watcher *cacheWatcher) {
@@ -1176,8 +1195,7 @@ type cacheWatcher struct {
// save it here to send bookmark events before that.
deadline time.Time
allowWatchBookmarks bool
// Object type of the cache watcher interests
objectType reflect.Type
groupResource schema.GroupResource
// human readable identifier that helps assigning cacheWatcher
// instance with request
@@ -1188,7 +1206,16 @@ type cacheWatcher struct {
drainInputBuffer bool
}
func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(bool), versioner storage.Versioner, deadline time.Time, allowWatchBookmarks bool, objectType reflect.Type, identifier string) *cacheWatcher {
func newCacheWatcher(
chanSize int,
filter filterWithAttrsFunc,
forget func(bool),
versioner storage.Versioner,
deadline time.Time,
allowWatchBookmarks bool,
groupResource schema.GroupResource,
identifier string,
) *cacheWatcher {
return &cacheWatcher{
input: make(chan *watchCacheEvent, chanSize),
result: make(chan watch.Event, chanSize),
@@ -1199,7 +1226,7 @@ func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(bool)
versioner: versioner,
deadline: deadline,
allowWatchBookmarks: allowWatchBookmarks,
objectType: objectType,
groupResource: groupResource,
identifier: identifier,
}
}
@@ -1256,8 +1283,8 @@ func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool {
// This means that we couldn't send event to that watcher.
// Since we don't want to block on it infinitely,
// we simply terminate it.
klog.V(1).Infof("Forcing %v watcher close due to unresponsiveness: %v. len(c.input) = %v, len(c.result) = %v", c.objectType.String(), c.identifier, len(c.input), len(c.result))
metrics.TerminatedWatchersCounter.WithLabelValues(c.objectType.String()).Inc()
klog.V(1).Infof("Forcing %v watcher close due to unresponsiveness: %v. len(c.input) = %v, len(c.result) = %v", c.groupResource.String(), c.identifier, len(c.input), len(c.result))
metrics.TerminatedWatchersCounter.WithLabelValues(c.groupResource.String()).Inc()
c.forget(false)
}
@@ -1455,13 +1482,12 @@ func (c *cacheWatcher) processInterval(ctx context.Context, cacheInterval *watch
initEventCount++
}
objType := c.objectType.String()
if initEventCount > 0 {
metrics.InitCounter.WithLabelValues(objType).Add(float64(initEventCount))
metrics.InitCounter.WithLabelValues(c.groupResource.String()).Add(float64(initEventCount))
}
processingTime := time.Since(startTime)
if processingTime > initProcessThreshold {
klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", initEventCount, objType, c.identifier, processingTime)
klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", initEventCount, c.groupResource, c.identifier, processingTime)
}
c.process(ctx, resourceVersion)

View File

@@ -17,9 +17,9 @@ limitations under the License.
package cacher
import (
"context"
"fmt"
"math"
"reflect"
"sort"
"sync"
"time"
@@ -28,13 +28,14 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/cacher/metrics"
"k8s.io/client-go/tools/cache"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
utiltrace "k8s.io/utils/trace"
)
const (
@@ -189,8 +190,8 @@ type watchCache struct {
// An underlying storage.Versioner.
versioner storage.Versioner
// cacher's objectType.
objectType reflect.Type
// cacher's group resource
groupResource schema.GroupResource
// For testing cache interval invalidation.
indexValidator indexValidator
@@ -203,7 +204,7 @@ func newWatchCache(
versioner storage.Versioner,
indexers *cache.Indexers,
clock clock.Clock,
objectType reflect.Type) *watchCache {
groupResource schema.GroupResource) *watchCache {
wc := &watchCache{
capacity: defaultLowerBoundCapacity,
keyFunc: keyFunc,
@@ -219,10 +220,9 @@ func newWatchCache(
eventHandler: eventHandler,
clock: clock,
versioner: versioner,
objectType: objectType,
groupResource: groupResource,
}
objType := objectType.String()
metrics.WatchCacheCapacity.WithLabelValues(objType).Set(float64(wc.capacity))
metrics.WatchCacheCapacity.WithLabelValues(groupResource.String()).Set(float64(wc.capacity))
wc.cond = sync.NewCond(wc.RLocker())
wc.indexValidator = wc.isIndexValidLocked
@@ -387,7 +387,7 @@ func (w *watchCache) doCacheResizeLocked(capacity int) {
newCache[i%capacity] = w.cache[i%w.capacity]
}
w.cache = newCache
metrics.RecordsWatchCacheCapacityChange(w.objectType.String(), w.capacity, capacity)
metrics.RecordsWatchCacheCapacityChange(w.groupResource.String(), w.capacity, capacity)
w.capacity = capacity
}
@@ -425,7 +425,7 @@ func (w *watchCache) List() []interface{} {
// waitUntilFreshAndBlock waits until cache is at least as fresh as given <resourceVersion>.
// NOTE: This function acquired lock and doesn't release it.
// You HAVE TO explicitly call w.RUnlock() after this function.
func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utiltrace.Trace) error {
func (w *watchCache) waitUntilFreshAndBlock(ctx context.Context, resourceVersion uint64) error {
startTime := w.clock.Now()
// In case resourceVersion is 0, we accept arbitrarily stale result.
@@ -450,9 +450,8 @@ func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utilt
}
w.RLock()
if trace != nil {
trace.Step("watchCache locked acquired")
}
span := tracing.SpanFromContext(ctx)
span.AddEvent("watchCache locked acquired")
for w.resourceVersion < resourceVersion {
if w.clock.Since(startTime) >= blockTimeout {
// Request that the client retry after 'resourceVersionTooHighRetrySeconds' seconds.
@@ -460,16 +459,14 @@ func (w *watchCache) waitUntilFreshAndBlock(resourceVersion uint64, trace *utilt
}
w.cond.Wait()
}
if trace != nil {
trace.Step("watchCache fresh enough")
}
span.AddEvent("watchCache fresh enough")
return nil
}
// WaitUntilFreshAndList returns list of pointers to `storeElement` objects along
// with their ResourceVersion and the name of the index, if any, that was used.
func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues []storage.MatchValue, trace *utiltrace.Trace) ([]interface{}, uint64, string, error) {
err := w.waitUntilFreshAndBlock(resourceVersion, trace)
func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) {
err := w.waitUntilFreshAndBlock(ctx, resourceVersion)
defer w.RUnlock()
if err != nil {
return nil, 0, "", err
@@ -488,8 +485,8 @@ func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64, matchValues [
}
// WaitUntilFreshAndGet returns a pointers to <storeElement> object.
func (w *watchCache) WaitUntilFreshAndGet(resourceVersion uint64, key string, trace *utiltrace.Trace) (interface{}, bool, uint64, error) {
err := w.waitUntilFreshAndBlock(resourceVersion, trace)
func (w *watchCache) WaitUntilFreshAndGet(ctx context.Context, resourceVersion uint64, key string) (interface{}, bool, uint64, error) {
err := w.waitUntilFreshAndBlock(ctx, resourceVersion)
defer w.RUnlock()
if err != nil {
return nil, false, 0, err