use istio client-go library instead of knative (#1661)

use istio client-go library instead of knative
bump kubernetes dependency version
change code coverage to codecov
This commit is contained in:
zryfish
2019-12-13 11:26:18 +08:00
committed by GitHub
parent f249a6e081
commit ea88c8803d
2071 changed files with 354531 additions and 108336 deletions

View File

@@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- mikedanese
- timothysc

View File

@@ -16,12 +16,16 @@ limitations under the License.
// Package leaderelection implements leader election of a set of endpoints.
// It uses an annotation in the endpoints object to store the record of the
// election state.
// election state. This implementation does not guarantee that only one
// client is acting as a leader (a.k.a. fencing).
//
// This implementation does not guarantee that only one client is acting as a
// leader (a.k.a. fencing). A client observes timestamps captured locally to
// infer the state of the leader election. Thus the implementation is tolerant
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
// A client only acts on timestamps captured locally to infer the state of the
// leader election. The client does not consider timestamps in the leader
// election record to be accurate because these timestamps may not have been
// produced by a local clock. The implemention does not depend on their
// accuracy and only uses their change to indicate that another client has
// renewed the leader lease. Thus the implementation is tolerant to arbitrary
// clock skew, but is not tolerant to arbitrary clock skew rate.
//
// However the level of tolerance to skew rate can be configured by setting
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
@@ -85,14 +89,23 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.RetryPeriod < 1 {
return nil, fmt.Errorf("retryPeriod must be greater than zero")
}
if lec.Callbacks.OnStartedLeading == nil {
return nil, fmt.Errorf("OnStartedLeading callback must not be nil")
}
if lec.Callbacks.OnStoppedLeading == nil {
return nil, fmt.Errorf("OnStoppedLeading callback must not be nil")
}
if lec.Lock == nil {
return nil, fmt.Errorf("Lock must not be nil.")
}
return &LeaderElector{
config: lec,
clock: clock.RealClock{},
}, nil
le := LeaderElector{
config: lec,
clock: clock.RealClock{},
metrics: globalMetricsFactory.newLeaderMetrics(),
}
le.metrics.leaderOff(le.config.Name)
return &le, nil
}
type LeaderElectionConfig struct {
@@ -102,12 +115,26 @@ type LeaderElectionConfig struct {
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack.
//
// A client needs to wait a full LeaseDuration without observing a change to
// the record before it can attempt to take over. When all clients are
// shutdown and a new set of clients are started with different names against
// the same leader record, they must wait the full LeaseDuration before
// attempting to acquire the lease. Thus LeaseDuration should be as short as
// possible (within your tolerance for clock skew rate) to avoid a possible
// long waits in the scenario.
//
// Core clients default this value to 15 seconds.
LeaseDuration time.Duration
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up.
//
// Core clients default this value to 10 seconds.
RenewDeadline time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
//
// Core clients default this value to 2 seconds.
RetryPeriod time.Duration
// Callbacks are callbacks that are triggered during certain lifecycle
@@ -118,6 +145,13 @@ type LeaderElectionConfig struct {
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// ReleaseOnCancel should be set true if the lock should be released
// when the run context is cancelled. If you set this to true, you must
// ensure all code guarded by this lease has successfully completed
// prior to cancelling the context, or you may have two processes
// simultaneously acting on the critical path.
ReleaseOnCancel bool
// Name is the name of the resource lock for debugging
Name string
}
@@ -152,6 +186,8 @@ type LeaderElector struct {
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
metrics leaderMetricsAdapter
// name is the name of the resource lock for debugging
name string
}
@@ -211,6 +247,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
return
}
le.config.Lock.RecordEvent("became leader")
le.metrics.leaderOn(le.config.Name)
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
@@ -246,9 +283,32 @@ func (le *LeaderElector) renew(ctx context.Context) {
return
}
le.config.Lock.RecordEvent("stopped leading")
le.metrics.leaderOff(le.config.Name)
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
// if we hold the lease, give it up
if le.config.ReleaseOnCancel {
le.release()
}
}
// release attempts to release the leader lease if we have acquired it.
func (le *LeaderElector) release() bool {
if !le.IsLeader() {
return true
}
leaderElectionRecord := rl.LeaderElectionRecord{
LeaderTransitions: le.observedRecord.LeaderTransitions,
}
if err := le.config.Lock.Update(leaderElectionRecord); err != nil {
klog.Errorf("Failed to release lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = le.clock.Now()
return true
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
@@ -284,7 +344,8 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = le.clock.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false

109
vendor/k8s.io/client-go/tools/leaderelection/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,109 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"sync"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type leaderMetricsAdapter interface {
leaderOn(name string)
leaderOff(name string)
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type SwitchMetric interface {
On(name string)
Off(name string)
}
type noopMetric struct{}
func (noopMetric) On(name string) {}
func (noopMetric) Off(name string) {}
// defaultLeaderMetrics expects the caller to lock before setting any metrics.
type defaultLeaderMetrics struct {
// leader's value indicates if the current process is the owner of name lease
leader SwitchMetric
}
func (m *defaultLeaderMetrics) leaderOn(name string) {
if m == nil {
return
}
m.leader.On(name)
}
func (m *defaultLeaderMetrics) leaderOff(name string) {
if m == nil {
return
}
m.leader.Off(name)
}
type noMetrics struct{}
func (noMetrics) leaderOn(name string) {}
func (noMetrics) leaderOff(name string) {}
// MetricsProvider generates various metrics used by the leader election.
type MetricsProvider interface {
NewLeaderMetric() SwitchMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric {
return noopMetric{}
}
var globalMetricsFactory = leaderMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type leaderMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter {
mp := f.metricsProvider
if mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultLeaderMetrics{
leader: mp.NewLeaderMetric(),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}

View File

@@ -93,6 +93,9 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error {
// RecordEvent in leader election while adding meta-data
func (cml *ConfigMapLock) RecordEvent(s string) {
if cml.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}

View File

@@ -88,6 +88,9 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
// RecordEvent in leader election while adding meta-data
func (el *EndpointsLock) RecordEvent(s string) {
if el.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}

View File

@@ -20,14 +20,16 @@ import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
)
const (
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
EndpointsResourceLock = "endpoints"
ConfigMapsResourceLock = "configmaps"
LeasesResourceLock = "leases"
)
// LeaderElectionRecord is the record that is stored in the leader election annotation.
@@ -35,6 +37,11 @@ const (
// with a random string (e.g. UUID) with only slight modification of this code.
// TODO(mikedanese): this should potentially be versioned
type LeaderElectionRecord struct {
// HolderIdentity is the ID that owns the lease. If empty, no one owns this lease and
// all callers may acquire. Versions of this library prior to Kubernetes 1.14 will not
// attempt to acquire leases with empty identities and will wait for the full lease
// interval to expire before attempting to reacquire. This value is set to empty when
// a client voluntarily steps down.
HolderIdentity string `json:"holderIdentity"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
@@ -42,11 +49,19 @@ type LeaderElectionRecord struct {
LeaderTransitions int `json:"leaderTransitions"`
}
// EventRecorder records a change in the ResourceLock.
type EventRecorder interface {
Eventf(obj runtime.Object, eventType, reason, message string, args ...interface{})
}
// ResourceLockConfig common data that exists across different
// resource locks
type ResourceLockConfig struct {
Identity string
EventRecorder record.EventRecorder
// Identity is the unique string identifying a lease holder across
// all participants in an election.
Identity string
// EventRecorder is optional.
EventRecorder EventRecorder
}
// Interface offers a common interface for locking on arbitrary
@@ -76,7 +91,7 @@ type Interface interface {
}
// Manufacture will create a lock of a given type according to the input parameters
func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) {
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
switch lockType {
case EndpointsResourceLock:
return &EndpointsLock{
@@ -84,7 +99,7 @@ func New(lockType string, ns string, name string, client corev1.CoreV1Interface,
Namespace: ns,
Name: name,
},
Client: client,
Client: coreClient,
LockConfig: rlc,
}, nil
case ConfigMapsResourceLock:
@@ -93,7 +108,16 @@ func New(lockType string, ns string, name string, client corev1.CoreV1Interface,
Namespace: ns,
Name: name,
},
Client: client,
Client: coreClient,
LockConfig: rlc,
}, nil
case LeasesResourceLock:
return &LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: coordinationClient,
LockConfig: rlc,
}, nil
default:

View File

@@ -0,0 +1,124 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"errors"
"fmt"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1"
)
type LeaseLock struct {
// LeaseMeta should contain a Name and a Namespace of a
// LeaseMeta object that the LeaderElector will attempt to lead.
LeaseMeta metav1.ObjectMeta
Client coordinationv1client.LeasesGetter
LockConfig ResourceLockConfig
lease *coordinationv1.Lease
}
// Get returns the election record from a Lease spec
func (ll *LeaseLock) Get() (*LeaderElectionRecord, error) {
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return LeaseSpecToLeaderElectionRecord(&ll.lease.Spec), nil
}
// Create attempts to create a Lease
func (ll *LeaseLock) Create(ler LeaderElectionRecord) error {
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: ll.LeaseMeta.Name,
Namespace: ll.LeaseMeta.Namespace,
},
Spec: LeaderElectionRecordToLeaseSpec(&ler),
})
return err
}
// Update will update an existing Lease spec.
func (ll *LeaseLock) Update(ler LeaderElectionRecord) error {
if ll.lease == nil {
return errors.New("lease not initialized, call get or create first")
}
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease)
return err
}
// RecordEvent in leader election while adding meta-data
func (ll *LeaseLock) RecordEvent(s string) {
if ll.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s)
ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (ll *LeaseLock) Describe() string {
return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name)
}
// returns the Identity of the lock
func (ll *LeaseLock) Identity() string {
return ll.LockConfig.Identity
}
func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord {
holderIdentity := ""
if spec.HolderIdentity != nil {
holderIdentity = *spec.HolderIdentity
}
leaseDurationSeconds := 0
if spec.LeaseDurationSeconds != nil {
leaseDurationSeconds = int(*spec.LeaseDurationSeconds)
}
leaseTransitions := 0
if spec.LeaseTransitions != nil {
leaseTransitions = int(*spec.LeaseTransitions)
}
return &LeaderElectionRecord{
HolderIdentity: holderIdentity,
LeaseDurationSeconds: leaseDurationSeconds,
AcquireTime: metav1.Time{spec.AcquireTime.Time},
RenewTime: metav1.Time{spec.RenewTime.Time},
LeaderTransitions: leaseTransitions,
}
}
func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec {
leaseDurationSeconds := int32(ler.LeaseDurationSeconds)
leaseTransitions := int32(ler.LeaderTransitions)
return coordinationv1.LeaseSpec{
HolderIdentity: &ler.HolderIdentity,
LeaseDurationSeconds: &leaseDurationSeconds,
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
LeaseTransitions: &leaseTransitions,
}
}