use istio client-go library instead of knative (#1661)

use istio client-go library instead of knative
bump kubernetes dependency version
change code coverage to codecov
This commit is contained in:
zryfish
2019-12-13 11:26:18 +08:00
committed by GitHub
parent f249a6e081
commit ea88c8803d
2071 changed files with 354531 additions and 108336 deletions

View File

@@ -31,14 +31,14 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.KBLog.WithName("controller")
var log = logf.RuntimeLog.WithName("controller")
var _ inject.Injector = &Controller{}
@@ -68,6 +68,11 @@ type Controller struct {
// specified, or the ~/.kube/Config.
Config *rest.Config
// MakeQueue constructs the queue for this controller once the controller is ready to start.
// This exists because the standard Kubernetes workqueues start themselves immediately, which
// leads to goroutine leaks if something calls controller.New repeatedly.
MakeQueue func() workqueue.RateLimitingInterface
// Queue is an listeningQueue that listens for events from Informers and adds object keys to
// the Queue for processing
Queue workqueue.RateLimitingInterface
@@ -93,6 +98,16 @@ type Controller struct {
Recorder record.EventRecorder
// TODO(community): Consider initializing a logger with the Controller Name as the tag
// watches maintains a list of sources, handlers, and predicates to start when the controller is started.
watches []watchDescription
}
// watchDescription contains all the information necessary to start a watch.
type watchDescription struct {
src source.Source
handler handler.EventHandler
predicates []predicate.Predicate
}
// Reconcile implements reconcile.Reconciler
@@ -118,73 +133,89 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
}
}
log.Info("Starting EventSource", "controller", c.Name, "source", src)
return src.Start(evthdler, c.Queue, prct...)
c.watches = append(c.watches, watchDescription{src: src, handler: evthdler, predicates: prct})
if c.Started {
log.Info("Starting EventSource", "controller", c.Name, "source", src)
return src.Start(evthdler, c.Queue, prct...)
}
return nil
}
// Start implements controller.Controller
func (c *Controller) Start(stop <-chan struct{}) error {
// use an IIFE to get proper lock handling
// but lock outside to get proper handling of the queue shutdown
c.mu.Lock()
// TODO(pwittrock): Reconsider HandleCrash
defer utilruntime.HandleCrash()
defer c.Queue.ShutDown()
c.Queue = c.MakeQueue()
defer c.Queue.ShutDown() // needs to be outside the iife so that we shutdown after the stop channel is closed
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
log.Info("Starting Controller", "controller", c.Name)
err := func() error {
defer c.mu.Unlock()
// Wait for the caches to be synced before starting workers
if c.WaitForCacheSync == nil {
c.WaitForCacheSync = c.Cache.WaitForCacheSync
}
if ok := c.WaitForCacheSync(stop); !ok {
// This code is unreachable right now since WaitForCacheSync will never return an error
// Leaving it here because that could happen in the future
err := fmt.Errorf("failed to wait for %s caches to sync", c.Name)
log.Error(err, "Could not wait for Cache to sync", "controller", c.Name)
c.mu.Unlock()
// TODO(pwittrock): Reconsider HandleCrash
defer utilruntime.HandleCrash()
// NB(directxman12): launch the sources *before* trying to wait for the
// caches to sync so that they have a chance to register their intendeded
// caches.
for _, watch := range c.watches {
log.Info("Starting EventSource", "controller", c.Name, "source", watch.src)
if err := watch.src.Start(watch.handler, c.Queue, watch.predicates...); err != nil {
return err
}
}
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
log.Info("Starting Controller", "controller", c.Name)
// Wait for the caches to be synced before starting workers
if c.WaitForCacheSync == nil {
c.WaitForCacheSync = c.Cache.WaitForCacheSync
}
if ok := c.WaitForCacheSync(stop); !ok {
// This code is unreachable right now since WaitForCacheSync will never return an error
// Leaving it here because that could happen in the future
err := fmt.Errorf("failed to wait for %s caches to sync", c.Name)
log.Error(err, "Could not wait for Cache to sync", "controller", c.Name)
return err
}
if c.JitterPeriod == 0 {
c.JitterPeriod = 1 * time.Second
}
// Launch workers to process resources
log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles)
for i := 0; i < c.MaxConcurrentReconciles; i++ {
// Process work items
go wait.Until(c.worker, c.JitterPeriod, stop)
}
c.Started = true
return nil
}()
if err != nil {
return err
}
if c.JitterPeriod == 0 {
c.JitterPeriod = 1 * time.Second
}
// Launch workers to process resources
log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles)
for i := 0; i < c.MaxConcurrentReconciles; i++ {
// Process work items
go wait.Until(func() {
for c.processNextWorkItem() {
}
}, c.JitterPeriod, stop)
}
c.Started = true
c.mu.Unlock()
<-stop
log.Info("Stopping workers", "controller", c.Name)
return nil
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
// This code copy-pasted from the sample-Controller.
// Update metrics after processing each item
reconcileStartTS := time.Now()
defer func() {
c.updateMetrics(time.Now().Sub(reconcileStartTS))
}()
obj, shutdown := c.Queue.Get()
if obj == nil {
// Sometimes the Queue gives us nil items when it starts up
c.Queue.Forget(obj)
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the reconcileHandler is never invoked concurrently with the same object.
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the reconcileHandler.
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.Queue.Get()
if shutdown {
// Stop working
return false
@@ -197,6 +228,17 @@ func (c *Controller) processNextWorkItem() bool {
// put back on the workqueue and attempted again after a back-off
// period.
defer c.Queue.Done(obj)
return c.reconcileHandler(obj)
}
func (c *Controller) reconcileHandler(obj interface{}) bool {
// Update metrics after processing each item
reconcileStartTS := time.Now()
defer func() {
c.updateMetrics(time.Now().Sub(reconcileStartTS))
}()
var req reconcile.Request
var ok bool
if req, ok = obj.(reconcile.Request); !ok {
@@ -209,7 +251,6 @@ func (c *Controller) processNextWorkItem() bool {
// Return true, don't take a break
return true
}
// RunInformersAndControllers the syncHandler, passing it the namespace/Name string of the
// resource to be synced.
if result, err := c.Do.Reconcile(req); err != nil {
@@ -219,6 +260,11 @@ func (c *Controller) processNextWorkItem() bool {
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "error").Inc()
return false
} else if result.RequeueAfter > 0 {
// The result.RequeueAfter request will be lost, if it is returned
// along with a non-nil error. But this is intended as
// We need to drive to stable reconcile loops before queuing due
// to result.RequestAfter
c.Queue.Forget(obj)
c.Queue.AddAfter(req, result.RequeueAfter)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue_after").Inc()
return true