use istio client-go library instead of knative (#1661)

use istio client-go library instead of knative
bump kubernetes dependency version
change code coverage to codecov
This commit is contained in:
zryfish
2019-12-13 11:26:18 +08:00
committed by GitHub
parent f249a6e081
commit ea88c8803d
2071 changed files with 354531 additions and 108336 deletions

View File

@@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- lavalamp
- smarterclayton

View File

@@ -20,6 +20,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/server/egressselector"
"k8s.io/apiserver/pkg/storage/value"
)
@@ -30,18 +31,26 @@ const (
DefaultCompactInterval = 5 * time.Minute
)
// Config is configuration for creating a storage backend.
type Config struct {
// Type defines the type of storage backend. Default ("") is "etcd3".
Type string
// Prefix is the prefix to all keys passed to storage.Interface methods.
Prefix string
// TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to.
type TransportConfig struct {
// ServerList is the list of storage servers to connect with.
ServerList []string
// TLS credentials
KeyFile string
CertFile string
CAFile string
// function to determine the egress dialer. (i.e. konnectivity server dialer)
EgressLookup egressselector.Lookup
}
// Config is configuration for creating a storage backend.
type Config struct {
// Type defines the type of storage backend. Default ("") is "etcd3".
Type string
// Prefix is the prefix to all keys passed to storage.Interface methods.
Prefix string
// Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to.
Transport TransportConfig
// Paging indicates whether the server implementation should allow paging (if it is
// supported). This is generally configured by feature gating, or by a specific
// resource type not wishing to allow paging, and is not intended for end users to
@@ -49,19 +58,24 @@ type Config struct {
Paging bool
Codec runtime.Codec
// EncodeVersioner is the same groupVersioner used to build the
// storage encoder. Given a list of kinds the input object might belong
// to, the EncodeVersioner outputs the gvk the object will be
// converted to before persisted in etcd.
EncodeVersioner runtime.GroupVersioner
// Transformer allows the value to be transformed prior to persisting into etcd.
Transformer value.Transformer
// CompactionInterval is an interval of requesting compaction from apiserver.
// If the value is 0, no compaction will be issued.
CompactionInterval time.Duration
// CountMetricPollPeriod specifies how often should count metric be updated
CountMetricPollPeriod time.Duration
}
func NewDefaultConfig(prefix string, codec runtime.Codec) *Config {
return &Config{
Paging: true,
Prefix: prefix,
Codec: codec,
CompactionInterval: DefaultCompactInterval,

View File

@@ -19,6 +19,10 @@ package factory
import (
"context"
"fmt"
"net"
"net/url"
"path"
"sync"
"sync/atomic"
"time"
@@ -27,11 +31,14 @@ import (
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/egressselector"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/value"
"k8s.io/component-base/metrics/legacyregistry"
)
// The short keepalive timeout and interval have been chosen to aggressively
@@ -44,6 +51,14 @@ const keepaliveTimeout = 10 * time.Second
// on heavily loaded arm64 CPUs (issue #64649)
const dialTimeout = 20 * time.Second
func init() {
// grpcprom auto-registers (via an init function) their client metrics, since we are opting out of
// using the global prometheus registry and using our own wrapped global registry,
// we need to explicitly register these metrics to our global registry here.
// For reference: https://github.com/kubernetes/kubernetes/pull/81387
legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)
}
func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
// constructing the etcd v3 client blocks and times out if etcd is not available.
// retry in a loop in the background until we successfully create the client, storing the client or error encountered
@@ -54,7 +69,7 @@ func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
clientErrMsg.Store("etcd client connection not yet established")
go wait.PollUntil(time.Second, func() (bool, error) {
client, err := newETCD3Client(c)
client, err := newETCD3Client(c.Transport)
if err != nil {
clientErrMsg.Store(err.Error())
return false, nil
@@ -71,14 +86,16 @@ func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
client := clientValue.Load().(*clientv3.Client)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
if _, err := client.Cluster.MemberList(ctx); err != nil {
return fmt.Errorf("error listing etcd members: %v", err)
// See https://github.com/etcd-io/etcd/blob/master/etcdctl/ctlv3/command/ep_command.go#L118
_, err := client.Get(ctx, path.Join(c.Prefix, "health"))
if err == nil {
return nil
}
return nil
return fmt.Errorf("error getting data from etcd: %v", err)
}, nil
}
func newETCD3Client(c storagebackend.Config) (*clientv3.Client, error) {
func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {
tlsInfo := transport.TLSInfo{
CertFile: c.CertFile,
KeyFile: c.KeyFile,
@@ -93,31 +110,123 @@ func newETCD3Client(c storagebackend.Config) (*clientv3.Client, error) {
if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.CAFile) == 0 {
tlsConfig = nil
}
networkContext := egressselector.Etcd.AsNetworkContext()
var egressDialer utilnet.DialFunc
if c.EgressLookup != nil {
egressDialer, err = c.EgressLookup(networkContext)
if err != nil {
return nil, err
}
}
dialOptions := []grpc.DialOption{
grpc.WithBlock(), // block until the underlying connection is up
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
}
if egressDialer != nil {
dialer := func(ctx context.Context, addr string) (net.Conn, error) {
u, err := url.Parse(addr)
if err != nil {
return nil, err
}
return egressDialer(ctx, "tcp", u.Host)
}
dialOptions = append(dialOptions, grpc.WithContextDialer(dialer))
}
cfg := clientv3.Config{
DialTimeout: dialTimeout,
DialKeepAliveTime: keepaliveTime,
DialKeepAliveTimeout: keepaliveTimeout,
DialOptions: []grpc.DialOption{
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
},
Endpoints: c.ServerList,
TLS: tlsConfig,
DialOptions: dialOptions,
Endpoints: c.ServerList,
TLS: tlsConfig,
}
return clientv3.New(cfg)
}
type runningCompactor struct {
interval time.Duration
cancel context.CancelFunc
client *clientv3.Client
refs int
}
var (
lock sync.Mutex
compactors = map[string]*runningCompactor{}
)
// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the
// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,
// the compactor is stopped.
func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {
lock.Lock()
defer lock.Unlock()
key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile}
if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {
compactorClient, err := newETCD3Client(c)
if err != nil {
return nil, err
}
if foundBefore {
// replace compactor
compactor.cancel()
compactor.client.Close()
} else {
// start new compactor
compactor = &runningCompactor{}
compactors[key] = compactor
}
ctx, cancel := context.WithCancel(context.Background())
compactor.interval = interval
compactor.cancel = cancel
compactor.client = compactorClient
etcd3.StartCompactor(ctx, compactorClient, interval)
}
compactors[key].refs++
return func() {
lock.Lock()
defer lock.Unlock()
compactor := compactors[key]
compactor.refs--
if compactor.refs == 0 {
compactor.cancel()
compactor.client.Close()
delete(compactors, key)
}
}, nil
}
func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
client, err := newETCD3Client(c)
stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)
if err != nil {
return nil, nil, err
}
ctx, cancel := context.WithCancel(context.Background())
etcd3.StartCompactor(ctx, client, c.CompactionInterval)
client, err := newETCD3Client(c.Transport)
if err != nil {
stopCompactor()
return nil, nil, err
}
var once sync.Once
destroyFunc := func() {
cancel()
client.Close()
// we know that storage destroy funcs are called multiple times (due to reuse in subresources).
// Hence, we only destroy once.
// TODO: fix duplicated storage destroy calls higher level
once.Do(func() {
stopCompactor()
client.Close()
})
}
transformer := c.Transformer
if transformer == nil {