Upgrade k8s package verison (#5358)
* upgrade k8s package version Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> * Script upgrade and code formatting. Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
15
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
15
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
@@ -1,12 +1,11 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- mikedanese
|
||||
- timothysc
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- timothysc
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- ingvagabund
|
||||
emeritus_approvers:
|
||||
- timothysc
|
||||
|
||||
50
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
50
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@@ -56,14 +56,15 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -142,7 +143,7 @@ type LeaderElectionConfig struct {
|
||||
Callbacks LeaderCallbacks
|
||||
|
||||
// WatchDog is the associated health checker
|
||||
// WatchDog may be null if its not needed/configured.
|
||||
// WatchDog may be null if it's not needed/configured.
|
||||
WatchDog *HealthzAdaptor
|
||||
|
||||
// ReleaseOnCancel should be set true if the lock should be released
|
||||
@@ -160,7 +161,7 @@ type LeaderElectionConfig struct {
|
||||
// lifecycle events of the LeaderElector. These are invoked asynchronously.
|
||||
//
|
||||
// possible future callbacks:
|
||||
// * OnChallenge()
|
||||
// - OnChallenge()
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(context.Context)
|
||||
@@ -187,6 +188,9 @@ type LeaderElector struct {
|
||||
// clock is wrapper around time to allow for less flaky testing
|
||||
clock clock.Clock
|
||||
|
||||
// used to lock the observedRecord
|
||||
observedRecordLock sync.Mutex
|
||||
|
||||
metrics leaderMetricsAdapter
|
||||
}
|
||||
|
||||
@@ -224,13 +228,14 @@ func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
|
||||
|
||||
// GetLeader returns the identity of the last observed leader or returns the empty string if
|
||||
// no leader has yet been observed.
|
||||
// This function is for informational purposes. (e.g. monitoring, logs, etc.)
|
||||
func (le *LeaderElector) GetLeader() string {
|
||||
return le.observedRecord.HolderIdentity
|
||||
return le.getObservedRecord().HolderIdentity
|
||||
}
|
||||
|
||||
// IsLeader returns true if the last observed leader was this client else returns false.
|
||||
func (le *LeaderElector) IsLeader() bool {
|
||||
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
|
||||
return le.getObservedRecord().HolderIdentity == le.config.Lock.Identity()
|
||||
}
|
||||
|
||||
// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds.
|
||||
@@ -301,8 +306,8 @@ func (le *LeaderElector) release() bool {
|
||||
klog.Errorf("Failed to release lock: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
|
||||
le.setObservedRecord(&leaderElectionRecord)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -329,16 +334,17 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
klog.Errorf("error initially creating leader election record: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
|
||||
le.setObservedRecord(&leaderElectionRecord)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// 2. Record obtained, check the Identity & Time
|
||||
if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) {
|
||||
le.observedRecord = *oldLeaderElectionRecord
|
||||
le.setObservedRecord(oldLeaderElectionRecord)
|
||||
|
||||
le.observedRawRecord = oldLeaderElectionRawRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
}
|
||||
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
|
||||
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
|
||||
@@ -362,8 +368,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
le.setObservedRecord(&leaderElectionRecord)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -392,3 +397,22 @@ func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setObservedRecord will set a new observedRecord and update observedTime to the current time.
|
||||
// Protect critical sections with lock.
|
||||
func (le *LeaderElector) setObservedRecord(observedRecord *rl.LeaderElectionRecord) {
|
||||
le.observedRecordLock.Lock()
|
||||
defer le.observedRecordLock.Unlock()
|
||||
|
||||
le.observedRecord = *observedRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
}
|
||||
|
||||
// getObservedRecord returns observersRecord.
|
||||
// Protect critical sections with lock.
|
||||
func (le *LeaderElector) getObservedRecord() rl.LeaderElectionRecord {
|
||||
le.observedRecordLock.Lock()
|
||||
defer le.observedRecordLock.Unlock()
|
||||
|
||||
return le.observedRecord
|
||||
}
|
||||
|
||||
20
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
20
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@@ -32,7 +32,7 @@ import (
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type ConfigMapLock struct {
|
||||
type configMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of a
|
||||
// ConfigMapMeta object that the LeaderElector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
@@ -42,7 +42,7 @@ type ConfigMapLock struct {
|
||||
}
|
||||
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
@@ -63,7 +63,7 @@ func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byt
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (cml *ConfigMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (cml *configMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -81,7 +81,7 @@ func (cml *ConfigMapLock) Create(ctx context.Context, ler LeaderElectionRecord)
|
||||
}
|
||||
|
||||
// Update will update an existing annotation on a given resource.
|
||||
func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (cml *configMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("configmap not initialized, call get or create first")
|
||||
}
|
||||
@@ -102,21 +102,25 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (cml *ConfigMapLock) RecordEvent(s string) {
|
||||
func (cml *configMapLock) RecordEvent(s string) {
|
||||
if cml.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
|
||||
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
|
||||
subject := &v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "ConfigMap"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
cml.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *ConfigMapLock) Describe() string {
|
||||
func (cml *configMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (cml *ConfigMapLock) Identity() string {
|
||||
func (cml *configMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
||||
|
||||
20
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
20
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@@ -27,7 +27,7 @@ import (
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type EndpointsLock struct {
|
||||
type endpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
@@ -37,7 +37,7 @@ type EndpointsLock struct {
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
@@ -58,7 +58,7 @@ func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *EndpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (el *endpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -76,7 +76,7 @@ func (el *EndpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) e
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (el *endpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
@@ -97,21 +97,25 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *EndpointsLock) RecordEvent(s string) {
|
||||
func (el *endpointsLock) RecordEvent(s string) {
|
||||
if el.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
|
||||
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
|
||||
subject := &v1.Endpoints{ObjectMeta: el.e.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "Endpoints"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
el.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *EndpointsLock) Describe() string {
|
||||
func (el *endpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (el *EndpointsLock) Identity() string {
|
||||
func (el *endpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
||||
|
||||
86
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
86
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@@ -31,11 +31,77 @@ import (
|
||||
|
||||
const (
|
||||
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
|
||||
EndpointsResourceLock = "endpoints"
|
||||
ConfigMapsResourceLock = "configmaps"
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - endpoints
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - configmaps
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@@ -98,7 +164,7 @@ type Interface interface {
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
|
||||
endpointsLock := &EndpointsLock{
|
||||
endpointsLock := &endpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
@@ -106,7 +172,7 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
configmapLock := &ConfigMapLock{
|
||||
configmapLock := &configMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
@@ -123,10 +189,10 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
LockConfig: rlc,
|
||||
}
|
||||
switch lockType {
|
||||
case EndpointsResourceLock:
|
||||
return endpointsLock, nil
|
||||
case ConfigMapsResourceLock:
|
||||
return configmapLock, nil
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", EndpointsLeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", ConfigMapsLeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case EndpointsLeasesResourceLock:
|
||||
|
||||
6
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
6
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@@ -87,7 +87,11 @@ func (ll *LeaseLock) RecordEvent(s string) {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s)
|
||||
ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events)
|
||||
subject := &coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "Lease"
|
||||
subject.APIVersion = coordinationv1.SchemeGroupVersion.String()
|
||||
ll.LockConfig.EventRecorder.Eventf(subject, corev1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
|
||||
Reference in New Issue
Block a user