update dependencies (#6267)
Signed-off-by: hongming <coder.scala@gmail.com>
This commit is contained in:
110
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
110
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
@@ -179,6 +179,24 @@ func (cm *controllerManager) add(r Runnable) error {
|
||||
return cm.runnables.Add(r)
|
||||
}
|
||||
|
||||
// AddMetricsServerExtraHandler adds extra handler served on path to the http server that serves metrics.
|
||||
func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
if cm.started {
|
||||
return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created")
|
||||
}
|
||||
if cm.metricsServer == nil {
|
||||
cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler will be ignored", "path", path)
|
||||
return nil
|
||||
}
|
||||
if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil {
|
||||
return err
|
||||
}
|
||||
cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddHealthzCheck allows you to add Healthz checker.
|
||||
func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error {
|
||||
cm.Lock()
|
||||
@@ -284,9 +302,8 @@ func (cm *controllerManager) addHealthProbeServer() error {
|
||||
mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
}
|
||||
|
||||
return cm.add(&server{
|
||||
Kind: "health probe",
|
||||
Log: cm.logger,
|
||||
return cm.add(&Server{
|
||||
Name: "health probe",
|
||||
Server: srv,
|
||||
Listener: cm.healthProbeListener,
|
||||
})
|
||||
@@ -302,9 +319,8 @@ func (cm *controllerManager) addPprofServer() error {
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
|
||||
return cm.add(&server{
|
||||
Kind: "pprof",
|
||||
Log: cm.logger,
|
||||
return cm.add(&Server{
|
||||
Name: "pprof",
|
||||
Server: srv,
|
||||
Listener: cm.pprofListener,
|
||||
})
|
||||
@@ -335,6 +351,16 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
||||
// Initialize the internal context.
|
||||
cm.internalCtx, cm.internalCancel = context.WithCancel(ctx)
|
||||
|
||||
// Leader elector must be created before defer that contains engageStopProcedure function
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/issues/2873
|
||||
var leaderElector *leaderelection.LeaderElector
|
||||
if cm.resourceLock != nil {
|
||||
leaderElector, err = cm.initLeaderElector()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed during initialization leader election process: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request
|
||||
stopComplete := make(chan struct{})
|
||||
defer close(stopComplete)
|
||||
@@ -384,14 +410,13 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// First start any internal HTTP servers, which includes health probes, metrics and profiling if enabled.
|
||||
// First start any HTTP servers, which includes health probes, metrics and profiling if enabled.
|
||||
//
|
||||
// WARNING: Internal HTTP servers MUST start before any cache is populated, otherwise it would block
|
||||
// conversion webhooks to be ready for serving which make the cache never get ready.
|
||||
if err := cm.runnables.HTTPServers.Start(cm.internalCtx); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start HTTP servers: %w", err)
|
||||
}
|
||||
// WARNING: HTTPServers includes the health probes, which MUST start before any cache is populated, otherwise
|
||||
// it would block conversion webhooks to be ready for serving which make the cache never get ready.
|
||||
logCtx := logr.NewContext(cm.internalCtx, cm.logger)
|
||||
if err := cm.runnables.HTTPServers.Start(logCtx); err != nil {
|
||||
return fmt.Errorf("failed to start HTTP servers: %w", err)
|
||||
}
|
||||
|
||||
// Start any webhook servers, which includes conversion, validation, and defaulting
|
||||
@@ -401,42 +426,39 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
||||
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
|
||||
// to never start because no cache can be populated.
|
||||
if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start webhooks: %w", err)
|
||||
}
|
||||
return fmt.Errorf("failed to start webhooks: %w", err)
|
||||
}
|
||||
|
||||
// Start and wait for caches.
|
||||
if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start caches: %w", err)
|
||||
}
|
||||
return fmt.Errorf("failed to start caches: %w", err)
|
||||
}
|
||||
|
||||
// Start the non-leaderelection Runnables after the cache has synced.
|
||||
if err := cm.runnables.Others.Start(cm.internalCtx); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start other runnables: %w", err)
|
||||
}
|
||||
return fmt.Errorf("failed to start other runnables: %w", err)
|
||||
}
|
||||
|
||||
// Start the leader election and all required runnables.
|
||||
{
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cm.leaderElectionCancel = cancel
|
||||
go func() {
|
||||
if cm.resourceLock != nil {
|
||||
if err := cm.startLeaderElection(ctx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
} else {
|
||||
if leaderElector != nil {
|
||||
// Start the leader elector process
|
||||
go func() {
|
||||
leaderElector.Run(ctx)
|
||||
<-ctx.Done()
|
||||
close(cm.leaderElectionStopped)
|
||||
}()
|
||||
} else {
|
||||
go func() {
|
||||
// Treat not having leader election enabled the same as being elected.
|
||||
if err := cm.startLeaderElectionRunnables(); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
close(cm.elected)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
ready = true
|
||||
@@ -485,8 +507,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
|
||||
cm.internalCancel()
|
||||
})
|
||||
select {
|
||||
case err, ok := <-cm.errChan:
|
||||
if ok {
|
||||
case err := <-cm.errChan:
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
cm.logger.Error(err, "error received after stop sequence was engaged")
|
||||
}
|
||||
case <-stopComplete:
|
||||
@@ -518,6 +540,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
|
||||
|
||||
// Stop all the leader election runnables, which includes reconcilers.
|
||||
cm.logger.Info("Stopping and waiting for leader election runnables")
|
||||
// Prevent leader election when shutting down a non-elected manager
|
||||
cm.runnables.LeaderElection.startOnce.Do(func() {})
|
||||
cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx)
|
||||
|
||||
// Stop the caches before the leader election runnables, this is an important
|
||||
@@ -553,12 +577,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startLeaderElectionRunnables() error {
|
||||
return cm.runnables.LeaderElection.Start(cm.internalCtx)
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error) {
|
||||
l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
func (cm *controllerManager) initLeaderElector() (*leaderelection.LeaderElector, error) {
|
||||
leaderElector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
Lock: cm.resourceLock,
|
||||
LeaseDuration: cm.leaseDuration,
|
||||
RenewDeadline: cm.renewDeadline,
|
||||
@@ -588,16 +608,14 @@ func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error
|
||||
Name: cm.leaderElectionID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start the leader elector process
|
||||
go func() {
|
||||
l.Run(ctx)
|
||||
<-ctx.Done()
|
||||
close(cm.leaderElectionStopped)
|
||||
}()
|
||||
return nil
|
||||
return leaderElector, nil
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startLeaderElectionRunnables() error {
|
||||
return cm.runnables.LeaderElection.Start(cm.internalCtx)
|
||||
}
|
||||
|
||||
func (cm *controllerManager) Elected() <-chan struct{} {
|
||||
|
||||
132
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
132
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
@@ -22,14 +22,12 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
@@ -41,7 +39,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
|
||||
@@ -67,6 +64,15 @@ type Manager interface {
|
||||
// election was configured.
|
||||
Elected() <-chan struct{}
|
||||
|
||||
// AddMetricsServerExtraHandler adds an extra handler served on path to the http server that serves metrics.
|
||||
// Might be useful to register some diagnostic endpoints e.g. pprof.
|
||||
//
|
||||
// Note that these endpoints are meant to be sensitive and shouldn't be exposed publicly.
|
||||
//
|
||||
// If the simple path -> handler mapping offered here is not enough,
|
||||
// a new http server/listener should be added as Runnable to the manager via Add method.
|
||||
AddMetricsServerExtraHandler(path string, handler http.Handler) error
|
||||
|
||||
// AddHealthzCheck allows you to add Healthz checker
|
||||
AddHealthzCheck(name string, check healthz.Checker) error
|
||||
|
||||
@@ -438,126 +444,6 @@ func New(config *rest.Config, options Options) (Manager, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AndFrom will use a supplied type and convert to Options
|
||||
// any options already set on Options will be ignored, this is used to allow
|
||||
// cli flags to override anything specified in the config file.
|
||||
//
|
||||
// Deprecated: This function has been deprecated and will be removed in a future release,
|
||||
// The Component Configuration package has been unmaintained for over a year and is no longer
|
||||
// actively developed. Users should migrate to their own configuration format
|
||||
// and configure Manager.Options directly.
|
||||
// See https://github.com/kubernetes-sigs/controller-runtime/issues/895
|
||||
// for more information, feedback, and comments.
|
||||
func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) {
|
||||
newObj, err := loader.Complete()
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
|
||||
o = o.setLeaderElectionConfig(newObj)
|
||||
|
||||
if o.Cache.SyncPeriod == nil && newObj.SyncPeriod != nil {
|
||||
o.Cache.SyncPeriod = &newObj.SyncPeriod.Duration
|
||||
}
|
||||
|
||||
if len(o.Cache.DefaultNamespaces) == 0 && newObj.CacheNamespace != "" {
|
||||
o.Cache.DefaultNamespaces = map[string]cache.Config{newObj.CacheNamespace: {}}
|
||||
}
|
||||
|
||||
if o.Metrics.BindAddress == "" && newObj.Metrics.BindAddress != "" {
|
||||
o.Metrics.BindAddress = newObj.Metrics.BindAddress
|
||||
}
|
||||
|
||||
if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" {
|
||||
o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress
|
||||
}
|
||||
|
||||
if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" {
|
||||
o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName
|
||||
}
|
||||
|
||||
if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" {
|
||||
o.LivenessEndpointName = newObj.Health.LivenessEndpointName
|
||||
}
|
||||
|
||||
if o.WebhookServer == nil {
|
||||
port := 0
|
||||
if newObj.Webhook.Port != nil {
|
||||
port = *newObj.Webhook.Port
|
||||
}
|
||||
o.WebhookServer = webhook.NewServer(webhook.Options{
|
||||
Port: port,
|
||||
Host: newObj.Webhook.Host,
|
||||
CertDir: newObj.Webhook.CertDir,
|
||||
})
|
||||
}
|
||||
|
||||
if newObj.Controller != nil {
|
||||
if o.Controller.CacheSyncTimeout == 0 && newObj.Controller.CacheSyncTimeout != nil {
|
||||
o.Controller.CacheSyncTimeout = *newObj.Controller.CacheSyncTimeout
|
||||
}
|
||||
|
||||
if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 {
|
||||
o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// AndFromOrDie will use options.AndFrom() and will panic if there are errors.
|
||||
//
|
||||
// Deprecated: This function has been deprecated and will be removed in a future release,
|
||||
// The Component Configuration package has been unmaintained for over a year and is no longer
|
||||
// actively developed. Users should migrate to their own configuration format
|
||||
// and configure Manager.Options directly.
|
||||
// See https://github.com/kubernetes-sigs/controller-runtime/issues/895
|
||||
// for more information, feedback, and comments.
|
||||
func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options {
|
||||
o, err := o.AndFrom(loader)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not parse config file: %v", err))
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
|
||||
if obj.LeaderElection == nil {
|
||||
// The source does not have any configuration; noop
|
||||
return o
|
||||
}
|
||||
|
||||
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
|
||||
o.LeaderElection = *obj.LeaderElection.LeaderElect
|
||||
}
|
||||
|
||||
if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" {
|
||||
o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock
|
||||
}
|
||||
|
||||
if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" {
|
||||
o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace
|
||||
}
|
||||
|
||||
if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" {
|
||||
o.LeaderElectionID = obj.LeaderElection.ResourceName
|
||||
}
|
||||
|
||||
if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) {
|
||||
o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration
|
||||
}
|
||||
|
||||
if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) {
|
||||
o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration
|
||||
}
|
||||
|
||||
if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) {
|
||||
o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// defaultHealthProbeListener creates the default health probes listener bound to the given address.
|
||||
func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
||||
if addr == "" || addr == "0" {
|
||||
|
||||
20
vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
generated
vendored
20
vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
generated
vendored
@@ -54,7 +54,10 @@ func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables {
|
||||
// The runnables added after Start are started directly.
|
||||
func (r *runnables) Add(fn Runnable) error {
|
||||
switch runnable := fn.(type) {
|
||||
case *server:
|
||||
case *Server:
|
||||
if runnable.NeedLeaderElection() {
|
||||
return r.LeaderElection.Add(fn, nil)
|
||||
}
|
||||
return r.HTTPServers.Add(fn, nil)
|
||||
case hasCache:
|
||||
return r.Caches.Add(fn, func(ctx context.Context) bool {
|
||||
@@ -263,6 +266,15 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error {
|
||||
r.start.Unlock()
|
||||
}
|
||||
|
||||
// Recheck if we're stopped and hold the readlock, given that the stop and start can be called
|
||||
// at the same time, we can end up in a situation where the runnable is added
|
||||
// after the group is stopped and the channel is closed.
|
||||
r.stop.RLock()
|
||||
defer r.stop.RUnlock()
|
||||
if r.stopped {
|
||||
return errRunnableGroupStopped
|
||||
}
|
||||
|
||||
// Enqueue the runnable.
|
||||
r.ch <- readyRunnable
|
||||
return nil
|
||||
@@ -272,7 +284,11 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error {
|
||||
func (r *runnableGroup) StopAndWait(ctx context.Context) {
|
||||
r.stopOnce.Do(func() {
|
||||
// Close the reconciler channel once we're done.
|
||||
defer close(r.ch)
|
||||
defer func() {
|
||||
r.stop.Lock()
|
||||
close(r.ch)
|
||||
r.stop.Unlock()
|
||||
}()
|
||||
|
||||
_ = r.Start(ctx)
|
||||
r.stop.Lock()
|
||||
|
||||
74
vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
generated
vendored
74
vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
generated
vendored
@@ -21,34 +21,67 @@ import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
crlog "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
// server is a general purpose HTTP server Runnable for a manager
|
||||
// to serve some internal handlers such as health probes, metrics and profiling.
|
||||
type server struct {
|
||||
Kind string
|
||||
Log logr.Logger
|
||||
Server *http.Server
|
||||
var (
|
||||
_ Runnable = (*Server)(nil)
|
||||
_ LeaderElectionRunnable = (*Server)(nil)
|
||||
)
|
||||
|
||||
// Server is a general purpose HTTP server Runnable for a manager.
|
||||
// It is used to serve some internal handlers for health probes and profiling,
|
||||
// but it can also be used to run custom servers.
|
||||
type Server struct {
|
||||
// Name is an optional string that describes the purpose of the server. It is used in logs to distinguish
|
||||
// among multiple servers.
|
||||
Name string
|
||||
|
||||
// Server is the HTTP server to run. It is required.
|
||||
Server *http.Server
|
||||
|
||||
// Listener is an optional listener to use. If not set, the server start a listener using the server.Addr.
|
||||
// Using a listener is useful when the port reservation needs to happen in advance of this runnable starting.
|
||||
Listener net.Listener
|
||||
|
||||
// OnlyServeWhenLeader is an optional bool that indicates that the server should only be started when the manager is the leader.
|
||||
OnlyServeWhenLeader bool
|
||||
|
||||
// ShutdownTimeout is an optional duration that indicates how long to wait for the server to shutdown gracefully. If not set,
|
||||
// the server will wait indefinitely for all connections to close.
|
||||
ShutdownTimeout *time.Duration
|
||||
}
|
||||
|
||||
func (s *server) Start(ctx context.Context) error {
|
||||
log := s.Log.WithValues("kind", s.Kind, "addr", s.Listener.Addr())
|
||||
// Start starts the server. It will block until the server is stopped or an error occurs.
|
||||
func (s *Server) Start(ctx context.Context) error {
|
||||
log := crlog.FromContext(ctx)
|
||||
if s.Name != "" {
|
||||
log = log.WithValues("name", s.Name)
|
||||
}
|
||||
log = log.WithValues("addr", s.addr())
|
||||
|
||||
serverShutdown := make(chan struct{})
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.Info("shutting down server")
|
||||
if err := s.Server.Shutdown(context.Background()); err != nil {
|
||||
|
||||
shutdownCtx := context.Background()
|
||||
if s.ShutdownTimeout != nil {
|
||||
var shutdownCancel context.CancelFunc
|
||||
shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), *s.ShutdownTimeout)
|
||||
defer shutdownCancel()
|
||||
}
|
||||
|
||||
if err := s.Server.Shutdown(shutdownCtx); err != nil {
|
||||
log.Error(err, "error shutting down server")
|
||||
}
|
||||
close(serverShutdown)
|
||||
}()
|
||||
|
||||
log.Info("starting server")
|
||||
if err := s.Server.Serve(s.Listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
if err := s.serve(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -56,6 +89,21 @@ func (s *server) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) NeedLeaderElection() bool {
|
||||
return false
|
||||
// NeedLeaderElection returns true if the server should only be started when the manager is the leader.
|
||||
func (s *Server) NeedLeaderElection() bool {
|
||||
return s.OnlyServeWhenLeader
|
||||
}
|
||||
|
||||
func (s *Server) addr() string {
|
||||
if s.Listener != nil {
|
||||
return s.Listener.Addr().String()
|
||||
}
|
||||
return s.Server.Addr
|
||||
}
|
||||
|
||||
func (s *Server) serve() error {
|
||||
if s.Listener != nil {
|
||||
return s.Server.Serve(s.Listener)
|
||||
}
|
||||
return s.Server.ListenAndServe()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user