Upgrade k8s package verison (#5358)
* upgrade k8s package version Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> * Script upgrade and code formatting. Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io> Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
This commit is contained in:
8
vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS
generated
vendored
8
vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS
generated
vendored
@@ -1,8 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- timothysc
|
||||
- hongchaodeng
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
|
||||
33
vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go
generated
vendored
33
vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go
generated
vendored
@@ -19,19 +19,25 @@ package storagebackend
|
||||
import (
|
||||
"time"
|
||||
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/server/egressselector"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
|
||||
)
|
||||
|
||||
const (
|
||||
StorageTypeUnset = ""
|
||||
StorageTypeETCD2 = "etcd2"
|
||||
StorageTypeETCD3 = "etcd3"
|
||||
|
||||
DefaultCompactInterval = 5 * time.Minute
|
||||
DefaultDBMetricPollInterval = 30 * time.Second
|
||||
DefaultHealthcheckTimeout = 2 * time.Second
|
||||
DefaultReadinessTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to.
|
||||
@@ -44,6 +50,8 @@ type TransportConfig struct {
|
||||
TrustedCAFile string
|
||||
// function to determine the egress dialer. (i.e. konnectivity server dialer)
|
||||
EgressLookup egressselector.Lookup
|
||||
// The TracerProvider can add tracing the connection
|
||||
TracerProvider oteltrace.TracerProvider
|
||||
}
|
||||
|
||||
// Config is configuration for creating a storage backend.
|
||||
@@ -78,8 +86,31 @@ type Config struct {
|
||||
DBMetricPollInterval time.Duration
|
||||
// HealthcheckTimeout specifies the timeout used when checking health
|
||||
HealthcheckTimeout time.Duration
|
||||
// ReadycheckTimeout specifies the timeout used when checking readiness
|
||||
ReadycheckTimeout time.Duration
|
||||
|
||||
LeaseManagerConfig etcd3.LeaseManagerConfig
|
||||
|
||||
// StorageObjectCountTracker is used to keep track of the total
|
||||
// number of objects in the storage per resource.
|
||||
StorageObjectCountTracker flowcontrolrequest.StorageObjectCountTracker
|
||||
}
|
||||
|
||||
// ConfigForResource is a Config specialized to a particular `schema.GroupResource`
|
||||
type ConfigForResource struct {
|
||||
// Config is the resource-independent configuration
|
||||
Config
|
||||
|
||||
// GroupResource is the relevant one
|
||||
GroupResource schema.GroupResource
|
||||
}
|
||||
|
||||
// ForResource specializes to the given resource
|
||||
func (config *Config) ForResource(resource schema.GroupResource) *ConfigForResource {
|
||||
return &ConfigForResource{
|
||||
Config: *config,
|
||||
GroupResource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
func NewDefaultConfig(prefix string, codec runtime.Codec) *Config {
|
||||
@@ -90,6 +121,8 @@ func NewDefaultConfig(prefix string, codec runtime.Codec) *Config {
|
||||
CompactionInterval: DefaultCompactInterval,
|
||||
DBMetricPollInterval: DefaultDBMetricPollInterval,
|
||||
HealthcheckTimeout: DefaultHealthcheckTimeout,
|
||||
ReadycheckTimeout: DefaultReadinessTimeout,
|
||||
LeaseManagerConfig: etcd3.NewDefaultLeaseManagerConfig(),
|
||||
Transport: TransportConfig{TracerProvider: oteltrace.NewNoopTracerProvider()},
|
||||
}
|
||||
}
|
||||
|
||||
171
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go
generated
vendored
171
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go
generated
vendored
@@ -19,28 +19,38 @@ package factory
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/pkg/transport"
|
||||
"go.etcd.io/etcd/client/pkg/v3/logutil"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/server/egressselector"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
tracing "k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@@ -58,6 +68,14 @@ const (
|
||||
dbMetricsMonitorJitter = 0.5
|
||||
)
|
||||
|
||||
// TODO(negz): Stop using a package scoped logger. At the time of writing we're
|
||||
// creating an etcd client for each CRD. We need to pass each etcd client a
|
||||
// logger or each client will create its own, which comes with a significant
|
||||
// memory cost (around 20% of the API server's memory when hundreds of CRDs are
|
||||
// present). The correct fix here is to not create a client per CRD. See
|
||||
// https://github.com/kubernetes/kubernetes/issues/111476 for more.
|
||||
var etcd3ClientLogger *zap.Logger
|
||||
|
||||
func init() {
|
||||
// grpcprom auto-registers (via an init function) their client metrics, since we are opting out of
|
||||
// using the global prometheus registry and using our own wrapped global registry,
|
||||
@@ -65,49 +83,115 @@ func init() {
|
||||
// For reference: https://github.com/kubernetes/kubernetes/pull/81387
|
||||
legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)
|
||||
dbMetricsMonitors = make(map[string]struct{})
|
||||
|
||||
l, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
|
||||
if err != nil {
|
||||
l = zap.NewNop()
|
||||
}
|
||||
etcd3ClientLogger = l.Named("etcd-client")
|
||||
}
|
||||
|
||||
func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
|
||||
// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
|
||||
// NOTE(negz): This is a copy of a private etcd client function:
|
||||
// https://github.com/etcd-io/etcd/blob/v3.5.4/client/v3/logger.go#L47
|
||||
func etcdClientDebugLevel() zapcore.Level {
|
||||
envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
|
||||
if envLevel == "" || envLevel == "true" {
|
||||
return zapcore.InfoLevel
|
||||
}
|
||||
var l zapcore.Level
|
||||
if err := l.Set(envLevel); err == nil {
|
||||
log.Printf("Deprecated env ETCD_CLIENT_DEBUG value. Using default level: 'info'")
|
||||
return zapcore.InfoLevel
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func newETCD3HealthCheck(c storagebackend.Config, stopCh <-chan struct{}) (func() error, error) {
|
||||
timeout := storagebackend.DefaultHealthcheckTimeout
|
||||
if c.HealthcheckTimeout != time.Duration(0) {
|
||||
timeout = c.HealthcheckTimeout
|
||||
}
|
||||
return newETCD3Check(c, timeout, stopCh)
|
||||
}
|
||||
|
||||
func newETCD3ReadyCheck(c storagebackend.Config, stopCh <-chan struct{}) (func() error, error) {
|
||||
timeout := storagebackend.DefaultReadinessTimeout
|
||||
if c.ReadycheckTimeout != time.Duration(0) {
|
||||
timeout = c.ReadycheckTimeout
|
||||
}
|
||||
return newETCD3Check(c, timeout, stopCh)
|
||||
}
|
||||
|
||||
func newETCD3Check(c storagebackend.Config, timeout time.Duration, stopCh <-chan struct{}) (func() error, error) {
|
||||
// constructing the etcd v3 client blocks and times out if etcd is not available.
|
||||
// retry in a loop in the background until we successfully create the client, storing the client or error encountered
|
||||
|
||||
clientValue := &atomic.Value{}
|
||||
|
||||
clientErrMsg := &atomic.Value{}
|
||||
clientErrMsg.Store("etcd client connection not yet established")
|
||||
lock := sync.Mutex{}
|
||||
var client *clientv3.Client
|
||||
clientErr := fmt.Errorf("etcd client connection not yet established")
|
||||
|
||||
go wait.PollUntil(time.Second, func() (bool, error) {
|
||||
client, err := newETCD3Client(c.Transport)
|
||||
newClient, err := newETCD3Client(c.Transport)
|
||||
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
// Ensure that server is already not shutting down.
|
||||
select {
|
||||
case <-stopCh:
|
||||
if err == nil {
|
||||
newClient.Close()
|
||||
}
|
||||
return true, nil
|
||||
default:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
clientErrMsg.Store(err.Error())
|
||||
clientErr = err
|
||||
return false, nil
|
||||
}
|
||||
clientValue.Store(client)
|
||||
clientErrMsg.Store("")
|
||||
client = newClient
|
||||
clientErr = nil
|
||||
return true, nil
|
||||
}, wait.NeverStop)
|
||||
}, stopCh)
|
||||
|
||||
// Close the client on shutdown.
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
<-stopCh
|
||||
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if client != nil {
|
||||
client.Close()
|
||||
clientErr = fmt.Errorf("server is shutting down")
|
||||
}
|
||||
}()
|
||||
|
||||
return func() error {
|
||||
if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {
|
||||
return fmt.Errorf(errMsg)
|
||||
// Given that client is closed on shutdown we hold the lock for
|
||||
// the entire period of healthcheck call to ensure that client will
|
||||
// not be closed during healthcheck.
|
||||
// Given that healthchecks has a 2s timeout, worst case of blocking
|
||||
// shutdown for additional 2s seems acceptable.
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if clientErr != nil {
|
||||
return clientErr
|
||||
}
|
||||
client := clientValue.Load().(*clientv3.Client)
|
||||
healthcheckTimeout := storagebackend.DefaultHealthcheckTimeout
|
||||
if c.HealthcheckTimeout != time.Duration(0) {
|
||||
healthcheckTimeout = c.HealthcheckTimeout
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), healthcheckTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
// See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118
|
||||
_, err := client.Get(ctx, path.Join("/", c.Prefix, "health"))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error getting data from etcd: %v", err)
|
||||
return fmt.Errorf("error getting data from etcd: %w", err)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {
|
||||
var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, error) {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: c.CertFile,
|
||||
KeyFile: c.KeyFile,
|
||||
@@ -132,19 +216,40 @@ func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error)
|
||||
}
|
||||
dialOptions := []grpc.DialOption{
|
||||
grpc.WithBlock(), // block until the underlying connection is up
|
||||
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
// use chained interceptors so that the default (retry and backoff) interceptors are added.
|
||||
// otherwise they will be overwritten by the metric interceptor.
|
||||
//
|
||||
// these optional interceptors will be placed after the default ones.
|
||||
// which seems to be what we want as the metrics will be collected on each attempt (retry)
|
||||
grpc.WithChainUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithChainStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) {
|
||||
tracingOpts := []otelgrpc.Option{
|
||||
otelgrpc.WithPropagators(tracing.Propagators()),
|
||||
otelgrpc.WithTracerProvider(c.TracerProvider),
|
||||
}
|
||||
// Even with Noop TracerProvider, the otelgrpc still handles context propagation.
|
||||
// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
|
||||
dialOptions = append(dialOptions,
|
||||
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor(tracingOpts...)),
|
||||
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...)))
|
||||
}
|
||||
if egressDialer != nil {
|
||||
dialer := func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if strings.Contains(addr, "//") {
|
||||
// etcd client prior to 3.5 passed URLs to dialer, normalize to address
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr = u.Host
|
||||
}
|
||||
return egressDialer(ctx, "tcp", u.Host)
|
||||
return egressDialer(ctx, "tcp", addr)
|
||||
}
|
||||
dialOptions = append(dialOptions, grpc.WithContextDialer(dialer))
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{
|
||||
DialTimeout: dialTimeout,
|
||||
DialKeepAliveTime: keepaliveTime,
|
||||
@@ -152,6 +257,7 @@ func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error)
|
||||
DialOptions: dialOptions,
|
||||
Endpoints: c.ServerList,
|
||||
TLS: tlsConfig,
|
||||
Logger: etcd3ClientLogger,
|
||||
}
|
||||
|
||||
return clientv3.New(cfg)
|
||||
@@ -222,7 +328,7 @@ func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
|
||||
func newETCD3Storage(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
|
||||
stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -234,6 +340,9 @@ func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (st
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// decorate the KV instance so we can track etcd latency per request.
|
||||
client.KV = etcd3.NewETCDLatencyTracker(client.KV)
|
||||
|
||||
stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -254,7 +363,7 @@ func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (st
|
||||
if transformer == nil {
|
||||
transformer = value.IdentityTransformer
|
||||
}
|
||||
return etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil
|
||||
return etcd3.New(client, c.Codec, newFunc, c.Prefix, c.GroupResource, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil
|
||||
}
|
||||
|
||||
// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the
|
||||
|
||||
25
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
generated
vendored
25
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
generated
vendored
@@ -28,10 +28,10 @@ import (
|
||||
type DestroyFunc func()
|
||||
|
||||
// Create creates a storage backend based on given config.
|
||||
func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
|
||||
func Create(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
|
||||
switch c.Type {
|
||||
case "etcd2":
|
||||
return nil, nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeETCD2:
|
||||
return nil, nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3:
|
||||
return newETCD3Storage(c, newFunc)
|
||||
default:
|
||||
@@ -40,12 +40,23 @@ func Create(c storagebackend.Config, newFunc func() runtime.Object) (storage.Int
|
||||
}
|
||||
|
||||
// CreateHealthCheck creates a healthcheck function based on given config.
|
||||
func CreateHealthCheck(c storagebackend.Config) (func() error, error) {
|
||||
func CreateHealthCheck(c storagebackend.Config, stopCh <-chan struct{}) (func() error, error) {
|
||||
switch c.Type {
|
||||
case "etcd2":
|
||||
return nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeETCD2:
|
||||
return nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3:
|
||||
return newETCD3HealthCheck(c)
|
||||
return newETCD3HealthCheck(c, stopCh)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown storage type: %s", c.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func CreateReadyCheck(c storagebackend.Config, stopCh <-chan struct{}) (func() error, error) {
|
||||
switch c.Type {
|
||||
case storagebackend.StorageTypeETCD2:
|
||||
return nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3:
|
||||
return newETCD3ReadyCheck(c, stopCh)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown storage type: %s", c.Type)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user