update dependencies (#6267)

Signed-off-by: hongming <coder.scala@gmail.com>
This commit is contained in:
hongming
2024-11-06 10:27:06 +08:00
committed by GitHub
parent faf255a084
commit cfebd96a1f
4263 changed files with 341374 additions and 132036 deletions

View File

@@ -29,16 +29,14 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/handler"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// Controller implements controller.Controller.
type Controller struct {
type Controller[request comparable] struct {
// Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required.
Name string
@@ -48,16 +46,19 @@ type Controller struct {
// Reconciler is a function that can be called at any time with the Name / Namespace of an object and
// ensures that the state of the system matches the state specified in the object.
// Defaults to the DefaultReconcileFunc.
Do reconcile.Reconciler
Do reconcile.TypedReconciler[request]
// MakeQueue constructs the queue for this controller once the controller is ready to start.
// This exists because the standard Kubernetes workqueues start themselves immediately, which
// RateLimiter is used to limit how frequently requests may be queued into the work queue.
RateLimiter workqueue.TypedRateLimiter[request]
// NewQueue constructs the queue for this controller once the controller is ready to start.
// This is a func because the standard Kubernetes work queues start themselves immediately, which
// leads to goroutine leaks if something calls controller.New repeatedly.
MakeQueue func() workqueue.RateLimitingInterface
NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request]
// Queue is an listeningQueue that listens for events from Informers and adds object keys to
// the Queue for processing
Queue workqueue.RateLimitingInterface
Queue workqueue.TypedRateLimitingInterface[request]
// mu is used to synchronize Controller setup
mu sync.Mutex
@@ -77,35 +78,31 @@ type Controller struct {
CacheSyncTimeout time.Duration
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
startWatches []watchDescription
startWatches []source.TypedSource[request]
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
// or for example when a watch is started.
// Note: LogConstructor has to be able to handle nil requests as we are also using it
// outside the context of a reconciliation.
LogConstructor func(request *reconcile.Request) logr.Logger
LogConstructor func(request *request) logr.Logger
// RecoverPanic indicates whether the panic caused by reconcile should be recovered.
// Defaults to true.
RecoverPanic *bool
// LeaderElected indicates whether the controller is leader elected or always running.
LeaderElected *bool
}
// watchDescription contains all the information necessary to start a watch.
type watchDescription struct {
src source.Source
handler handler.EventHandler
predicates []predicate.Predicate
}
// Reconcile implements reconcile.Reconciler.
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
func (c *Controller[request]) Reconcile(ctx context.Context, req request) (_ reconcile.Result, err error) {
defer func() {
if r := recover(); r != nil {
if c.RecoverPanic != nil && *c.RecoverPanic {
ctrlmetrics.ReconcilePanics.WithLabelValues(c.Name).Inc()
if c.RecoverPanic == nil || *c.RecoverPanic {
for _, fn := range utilruntime.PanicHandlers {
fn(r)
fn(ctx, r)
}
err = fmt.Errorf("panic: %v [recovered]", r)
return
@@ -120,7 +117,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ re
}
// Watch implements controller.Controller.
func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error {
func (c *Controller[request]) Watch(src source.TypedSource[request]) error {
c.mu.Lock()
defer c.mu.Unlock()
@@ -128,16 +125,16 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
//
// These watches are going to be held on the controller struct until the manager or user calls Start(...).
if !c.Started {
c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct})
c.startWatches = append(c.startWatches, src)
return nil
}
c.LogConstructor(nil).Info("Starting EventSource", "source", src)
return src.Start(c.ctx, evthdler, c.Queue, prct...)
return src.Start(c.ctx, c.Queue)
}
// NeedLeaderElection implements the manager.LeaderElectionRunnable interface.
func (c *Controller) NeedLeaderElection() bool {
func (c *Controller[request]) NeedLeaderElection() bool {
if c.LeaderElected == nil {
return true
}
@@ -145,7 +142,7 @@ func (c *Controller) NeedLeaderElection() bool {
}
// Start implements controller.Controller.
func (c *Controller) Start(ctx context.Context) error {
func (c *Controller[request]) Start(ctx context.Context) error {
// use an IIFE to get proper lock handling
// but lock outside to get proper handling of the queue shutdown
c.mu.Lock()
@@ -158,7 +155,7 @@ func (c *Controller) Start(ctx context.Context) error {
// Set the internal context.
c.ctx = ctx
c.Queue = c.MakeQueue()
c.Queue = c.NewQueue(c.Name, c.RateLimiter)
go func() {
<-ctx.Done()
c.Queue.ShutDown()
@@ -175,9 +172,9 @@ func (c *Controller) Start(ctx context.Context) error {
// caches to sync so that they have a chance to register their intendeded
// caches.
for _, watch := range c.startWatches {
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch))
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
if err := watch.Start(ctx, c.Queue); err != nil {
return err
}
}
@@ -186,7 +183,7 @@ func (c *Controller) Start(ctx context.Context) error {
c.LogConstructor(nil).Info("Starting Controller")
for _, watch := range c.startWatches {
syncingSource, ok := watch.src.(source.SyncingSource)
syncingSource, ok := watch.(source.SyncingSource)
if !ok {
continue
}
@@ -245,7 +242,7 @@ func (c *Controller) Start(ctx context.Context) error {
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the reconcileHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
func (c *Controller[request]) processNextWorkItem(ctx context.Context) bool {
obj, shutdown := c.Queue.Get()
if shutdown {
// Stop working
@@ -274,35 +271,25 @@ const (
labelSuccess = "success"
)
func (c *Controller) initMetrics() {
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0)
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0)
func (c *Controller[request]) initMetrics() {
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0)
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0)
ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Add(0)
ctrlmetrics.ReconcilePanics.WithLabelValues(c.Name).Add(0)
ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles))
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0)
}
func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
func (c *Controller[request]) reconcileHandler(ctx context.Context, req request) {
// Update metrics after processing each item
reconcileStartTS := time.Now()
defer func() {
c.updateMetrics(time.Since(reconcileStartTS))
}()
// Make sure that the object is a valid request.
req, ok := obj.(reconcile.Request)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.Queue.Forget(obj)
c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
// Return true, don't take a break
return
}
log := c.LogConstructor(&req)
reconcileID := uuid.NewUUID()
@@ -333,7 +320,7 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
// along with a non-nil error. But this is intended as
// We need to drive to stable reconcile loops before queuing due
// to result.RequestAfter
c.Queue.Forget(obj)
c.Queue.Forget(req)
c.Queue.AddAfter(req, result.RequeueAfter)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc()
case result.Requeue:
@@ -344,18 +331,18 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
log.V(5).Info("Reconcile successful")
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.Queue.Forget(obj)
c.Queue.Forget(req)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc()
}
}
// GetLogger returns this controller's logger.
func (c *Controller) GetLogger() logr.Logger {
func (c *Controller[request]) GetLogger() logr.Logger {
return c.LogConstructor(nil)
}
// updateMetrics updates prometheus metrics within the controller.
func (c *Controller) updateMetrics(reconcileTime time.Duration) {
func (c *Controller[request]) updateMetrics(reconcileTime time.Duration) {
ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds())
}

View File

@@ -46,6 +46,13 @@ var (
Help: "Total number of terminal reconciliation errors per controller",
}, []string{"controller"})
// ReconcilePanics is a prometheus counter metrics which holds the total
// number of panics from the Reconciler.
ReconcilePanics = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_runtime_reconcile_panics_total",
Help: "Total number of reconciliation panics per controller",
}, []string{"controller"})
// ReconcileTime is a prometheus metric which keeps track of the duration
// of reconciliations.
ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
@@ -75,6 +82,7 @@ func init() {
ReconcileTotal,
ReconcileErrors,
TerminalReconcileErrors,
ReconcilePanics,
ReconcileTime,
WorkerCount,
ActiveWorkers,

View File

@@ -33,8 +33,12 @@ import (
var log = logf.RuntimeLog.WithName("source").WithName("EventHandler")
// NewEventHandler creates a new EventHandler.
func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler {
return &EventHandler{
func NewEventHandler[object client.Object, request comparable](
ctx context.Context,
queue workqueue.TypedRateLimitingInterface[request],
handler handler.TypedEventHandler[object, request],
predicates []predicate.TypedPredicate[object]) *EventHandler[object, request] {
return &EventHandler[object, request]{
ctx: ctx,
handler: handler,
queue: queue,
@@ -43,19 +47,19 @@ func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface,
}
// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface.
type EventHandler struct {
type EventHandler[object client.Object, request comparable] struct {
// ctx stores the context that created the event handler
// that is used to propagate cancellation signals to each handler function.
ctx context.Context
handler handler.EventHandler
queue workqueue.RateLimitingInterface
predicates []predicate.Predicate
handler handler.TypedEventHandler[object, request]
queue workqueue.TypedRateLimitingInterface[request]
predicates []predicate.TypedPredicate[object]
}
// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs
// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27
func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs {
func (e *EventHandler[object, request]) HandlerFuncs() cache.ResourceEventHandlerFuncs {
return cache.ResourceEventHandlerFuncs{
AddFunc: e.OnAdd,
UpdateFunc: e.OnUpdate,
@@ -64,11 +68,11 @@ func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs {
}
// OnAdd creates CreateEvent and calls Create on EventHandler.
func (e *EventHandler) OnAdd(obj interface{}) {
c := event.CreateEvent{}
func (e *EventHandler[object, request]) OnAdd(obj interface{}) {
c := event.TypedCreateEvent[object]{}
// Pull Object out of the object
if o, ok := obj.(client.Object); ok {
if o, ok := obj.(object); ok {
c.Object = o
} else {
log.Error(nil, "OnAdd missing Object",
@@ -89,10 +93,10 @@ func (e *EventHandler) OnAdd(obj interface{}) {
}
// OnUpdate creates UpdateEvent and calls Update on EventHandler.
func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {
u := event.UpdateEvent{}
func (e *EventHandler[object, request]) OnUpdate(oldObj, newObj interface{}) {
u := event.TypedUpdateEvent[object]{}
if o, ok := oldObj.(client.Object); ok {
if o, ok := oldObj.(object); ok {
u.ObjectOld = o
} else {
log.Error(nil, "OnUpdate missing ObjectOld",
@@ -101,7 +105,7 @@ func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {
}
// Pull Object out of the object
if o, ok := newObj.(client.Object); ok {
if o, ok := newObj.(object); ok {
u.ObjectNew = o
} else {
log.Error(nil, "OnUpdate missing ObjectNew",
@@ -122,8 +126,8 @@ func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) {
}
// OnDelete creates DeleteEvent and calls Delete on EventHandler.
func (e *EventHandler) OnDelete(obj interface{}) {
d := event.DeleteEvent{}
func (e *EventHandler[object, request]) OnDelete(obj interface{}) {
d := event.TypedDeleteEvent[object]{}
// Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a
// DeleteFinalStateUnknown struct, so the object needs to be pulled out.
@@ -149,7 +153,7 @@ func (e *EventHandler) OnDelete(obj interface{}) {
}
// Pull Object out of the object
if o, ok := obj.(client.Object); ok {
if o, ok := obj.(object); ok {
d.Object = o
} else {
log.Error(nil, "OnDelete missing Object",

View File

@@ -4,12 +4,14 @@ import (
"context"
"errors"
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
@@ -17,34 +19,40 @@ import (
)
// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create).
type Kind struct {
type Kind[object client.Object, request comparable] struct {
// Type is the type of object to watch. e.g. &v1.Pod{}
Type client.Object
Type object
// Cache used to watch APIs
Cache cache.Cache
// started may contain an error if one was encountered during startup. If its closed and does not
Handler handler.TypedEventHandler[object, request]
Predicates []predicate.TypedPredicate[object]
// startedErr may contain an error if one was encountered during startup. If its closed and does not
// contain an error, startup and syncing finished.
started chan error
startedErr chan error
startCancel func()
}
// Start is internal and should be called only by the Controller to register an EventHandler with the Informer
// to enqueue reconcile.Requests.
func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface,
prct ...predicate.Predicate) error {
if ks.Type == nil {
func (ks *Kind[object, request]) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[request]) error {
if isNil(ks.Type) {
return fmt.Errorf("must create Kind with a non-nil object")
}
if ks.Cache == nil {
if isNil(ks.Cache) {
return fmt.Errorf("must create Kind with a non-nil cache")
}
if isNil(ks.Handler) {
return errors.New("must create Kind with non-nil handler")
}
// cache.GetInformer will block until its context is cancelled if the cache was already started and it can not
// sync that informer (most commonly due to RBAC issues).
ctx, ks.startCancel = context.WithCancel(ctx)
ks.started = make(chan error)
ks.startedErr = make(chan error)
go func() {
var (
i cache.Informer
@@ -72,30 +80,30 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w
return true, nil
}); err != nil {
if lastErr != nil {
ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr)
ks.startedErr <- fmt.Errorf("failed to get informer from cache: %w", lastErr)
return
}
ks.started <- err
ks.startedErr <- err
return
}
_, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs())
_, err := i.AddEventHandler(NewEventHandler(ctx, queue, ks.Handler, ks.Predicates).HandlerFuncs())
if err != nil {
ks.started <- err
ks.startedErr <- err
return
}
if !ks.Cache.WaitForCacheSync(ctx) {
// Would be great to return something more informative here
ks.started <- errors.New("cache did not sync")
ks.startedErr <- errors.New("cache did not sync")
}
close(ks.started)
close(ks.startedErr)
}()
return nil
}
func (ks *Kind) String() string {
if ks.Type != nil {
func (ks *Kind[object, request]) String() string {
if !isNil(ks.Type) {
return fmt.Sprintf("kind source: %T", ks.Type)
}
return "kind source: unknown type"
@@ -103,9 +111,9 @@ func (ks *Kind) String() string {
// WaitForSync implements SyncingSource to allow controllers to wait with starting
// workers until the cache is synced.
func (ks *Kind) WaitForSync(ctx context.Context) error {
func (ks *Kind[object, request]) WaitForSync(ctx context.Context) error {
select {
case err := <-ks.started:
case err := <-ks.startedErr:
return err
case <-ctx.Done():
ks.startCancel()
@@ -115,3 +123,15 @@ func (ks *Kind) WaitForSync(ctx context.Context) error {
return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type)
}
}
func isNil(arg any) bool {
if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr ||
v.Kind() == reflect.Interface ||
v.Kind() == reflect.Slice ||
v.Kind() == reflect.Map ||
v.Kind() == reflect.Chan ||
v.Kind() == reflect.Func) && v.IsNil()) {
return true
}
return false
}