update dependencies (#6267)
Signed-off-by: hongming <coder.scala@gmail.com>
This commit is contained in:
3
vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS
generated
vendored
3
vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS
generated
vendored
@@ -2,3 +2,6 @@
|
||||
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- serathius
|
||||
labels:
|
||||
- sig/etcd
|
||||
|
||||
13
vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go
generated
vendored
13
vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go
generated
vendored
@@ -84,7 +84,7 @@ var (
|
||||
},
|
||||
[]string{"endpoint"},
|
||||
)
|
||||
storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"cluster"}, nil, compbasemetrics.ALPHA, "")
|
||||
storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"storage_cluster_id"}, nil, compbasemetrics.STABLE, "")
|
||||
storageMonitor = &monitorCollector{monitorGetter: func() ([]Monitor, error) { return nil, nil }}
|
||||
etcdEventsReceivedCounts = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
@@ -167,6 +167,7 @@ func Register() {
|
||||
legacyregistry.MustRegister(objectCounts)
|
||||
legacyregistry.MustRegister(dbTotalSize)
|
||||
legacyregistry.CustomMustRegister(storageMonitor)
|
||||
legacyregistry.MustRegister(etcdEventsReceivedCounts)
|
||||
legacyregistry.MustRegister(etcdBookmarkCounts)
|
||||
legacyregistry.MustRegister(etcdLeaseObjectCounts)
|
||||
legacyregistry.MustRegister(listStorageCount)
|
||||
@@ -287,21 +288,21 @@ func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric
|
||||
}
|
||||
|
||||
for i, m := range monitors {
|
||||
cluster := fmt.Sprintf("etcd-%d", i)
|
||||
storageClusterID := fmt.Sprintf("etcd-%d", i)
|
||||
|
||||
klog.V(4).InfoS("Start collecting storage metrics", "cluster", cluster)
|
||||
klog.V(4).InfoS("Start collecting storage metrics", "storage_cluster_id", storageClusterID)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
metrics, err := m.Monitor(ctx)
|
||||
cancel()
|
||||
m.Close()
|
||||
if err != nil {
|
||||
klog.InfoS("Failed to get storage metrics", "cluster", cluster, "err", err)
|
||||
klog.InfoS("Failed to get storage metrics", "storage_cluster_id", storageClusterID, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), cluster)
|
||||
metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), storageClusterID)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to create metric", "cluster", cluster)
|
||||
klog.ErrorS(err, "Failed to create metric", "storage_cluster_id", storageClusterID)
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
|
||||
147
vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go
generated
vendored
147
vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go
generated
vendored
@@ -38,9 +38,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
|
||||
etcdfeature "k8s.io/apiserver/pkg/storage/feature"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -138,6 +142,9 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func
|
||||
w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) {
|
||||
return storage.GetCurrentResourceVersionFromStorage(ctx, s, newListFunc, resourcePrefix, w.objectType)
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) || utilfeature.DefaultFeatureGate.Enabled(features.WatchList) {
|
||||
etcdfeature.DefaultFeatureSupportChecker.CheckClient(c.Ctx(), c, storage.RequestWatchProgress)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -584,6 +591,52 @@ func (s *store) Count(key string) (int64, error) {
|
||||
return getResp.Count, nil
|
||||
}
|
||||
|
||||
// ReadinessCheck implements storage.Interface.
|
||||
func (s *store) ReadinessCheck() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveGetListRev is used by GetList to resolve the rev to use in the client.KV.Get request.
|
||||
func (s *store) resolveGetListRev(continueKey string, continueRV int64, opts storage.ListOptions) (int64, error) {
|
||||
var withRev int64
|
||||
// Uses continueRV if this is a continuation request.
|
||||
if len(continueKey) > 0 {
|
||||
if len(opts.ResourceVersion) > 0 && opts.ResourceVersion != "0" {
|
||||
return withRev, apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
|
||||
}
|
||||
// If continueRV > 0, the LIST request needs a specific resource version.
|
||||
// continueRV==0 is invalid.
|
||||
// If continueRV < 0, the request is for the latest resource version.
|
||||
if continueRV > 0 {
|
||||
withRev = continueRV
|
||||
}
|
||||
return withRev, nil
|
||||
}
|
||||
// Returns 0 if ResourceVersion is not specified.
|
||||
if len(opts.ResourceVersion) == 0 {
|
||||
return withRev, nil
|
||||
}
|
||||
parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return withRev, apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
|
||||
}
|
||||
|
||||
switch opts.ResourceVersionMatch {
|
||||
case metav1.ResourceVersionMatchNotOlderThan:
|
||||
// The not older than constraint is checked after we get a response from etcd,
|
||||
// and returnedRV is then set to the revision we get from the etcd response.
|
||||
case metav1.ResourceVersionMatchExact:
|
||||
withRev = int64(parsedRV)
|
||||
case "": // legacy case
|
||||
if opts.Recursive && opts.Predicate.Limit > 0 && parsedRV > 0 {
|
||||
withRev = int64(parsedRV)
|
||||
}
|
||||
default:
|
||||
return withRev, fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch)
|
||||
}
|
||||
return withRev, nil
|
||||
}
|
||||
|
||||
// GetList implements storage.Interface.
|
||||
func (s *store) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
|
||||
preparedKey, err := s.prepareKey(key)
|
||||
@@ -636,41 +689,15 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption
|
||||
|
||||
var continueRV, withRev int64
|
||||
var continueKey string
|
||||
switch {
|
||||
case opts.Recursive && len(opts.Predicate.Continue) > 0:
|
||||
if opts.Recursive && len(opts.Predicate.Continue) > 0 {
|
||||
continueKey, continueRV, err = storage.DecodeContinue(opts.Predicate.Continue, keyPrefix)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
|
||||
}
|
||||
|
||||
if len(opts.ResourceVersion) > 0 && opts.ResourceVersion != "0" {
|
||||
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
|
||||
}
|
||||
preparedKey = continueKey
|
||||
// If continueRV > 0, the LIST request needs a specific resource version.
|
||||
// continueRV==0 is invalid.
|
||||
// If continueRV < 0, the request is for the latest resource version.
|
||||
if continueRV > 0 {
|
||||
withRev = continueRV
|
||||
}
|
||||
case len(opts.ResourceVersion) > 0:
|
||||
parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
|
||||
}
|
||||
switch opts.ResourceVersionMatch {
|
||||
case metav1.ResourceVersionMatchNotOlderThan:
|
||||
// The not older than constraint is checked after we get a response from etcd,
|
||||
// and returnedRV is then set to the revision we get from the etcd response.
|
||||
case metav1.ResourceVersionMatchExact:
|
||||
withRev = int64(parsedRV)
|
||||
case "": // legacy case
|
||||
if opts.Recursive && opts.Predicate.Limit > 0 && parsedRV > 0 {
|
||||
withRev = int64(parsedRV)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch)
|
||||
}
|
||||
}
|
||||
if withRev, err = s.resolveGetListRev(continueKey, continueRV, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if withRev != 0 {
|
||||
@@ -738,10 +765,25 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption
|
||||
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
|
||||
}
|
||||
|
||||
if err := appendListItem(v, data, uint64(kv.ModRevision), opts.Predicate, s.codec, s.versioner, newItemFunc); err != nil {
|
||||
// Check if the request has already timed out before decode object
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// parent context is canceled or timed out, no point in continuing
|
||||
return storage.NewTimeoutError(string(kv.Key), "request did not complete within requested timeout")
|
||||
default:
|
||||
}
|
||||
|
||||
obj, err := decodeListItem(ctx, data, uint64(kv.ModRevision), s.codec, s.versioner, newItemFunc)
|
||||
if err != nil {
|
||||
recordDecodeError(s.groupResourceString, string(kv.Key))
|
||||
return err
|
||||
}
|
||||
|
||||
// being unable to set the version does not prevent the object from being extracted
|
||||
if matched, err := opts.Predicate.Matches(obj); err == nil && matched {
|
||||
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
|
||||
}
|
||||
|
||||
numEvald++
|
||||
|
||||
// free kv early. Long lists can take O(seconds) to decode.
|
||||
@@ -774,27 +816,11 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption
|
||||
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
|
||||
}
|
||||
|
||||
// instruct the client to begin querying from immediately after the last key we returned
|
||||
// we never return a key that the client wouldn't be allowed to see
|
||||
if hasMore {
|
||||
// we want to start immediately after the last key
|
||||
next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, withRev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var remainingItemCount *int64
|
||||
// getResp.Count counts in objects that do not match the pred.
|
||||
// Instead of returning inaccurate count for non-empty selectors, we return nil.
|
||||
// Only set remainingItemCount if the predicate is empty.
|
||||
if opts.Predicate.Empty() {
|
||||
c := int64(getResp.Count - opts.Predicate.Limit)
|
||||
remainingItemCount = &c
|
||||
}
|
||||
return s.versioner.UpdateList(listObj, uint64(withRev), next, remainingItemCount)
|
||||
continueValue, remainingItemCount, err := storage.PrepareContinueToken(string(lastKey), keyPrefix, withRev, getResp.Count, hasMore, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// no continuation
|
||||
return s.versioner.UpdateList(listObj, uint64(withRev), "", nil)
|
||||
return s.versioner.UpdateList(listObj, uint64(withRev), continueValue, remainingItemCount)
|
||||
}
|
||||
|
||||
// growSlice takes a slice value and grows its capacity up
|
||||
@@ -1015,20 +1041,23 @@ func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objP
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
|
||||
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
|
||||
// decodeListItem decodes bytes value in array into object.
|
||||
func decodeListItem(ctx context.Context, data []byte, rev uint64, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) (runtime.Object, error) {
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
endpointsrequest.TrackDecodeLatency(ctx, time.Since(startedAt))
|
||||
}()
|
||||
|
||||
obj, _, err := codec.Decode(data, nil, newItemFunc())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// being unable to set the version does not prevent the object from being extracted
|
||||
|
||||
if err := versioner.UpdateObject(obj, rev); err != nil {
|
||||
klog.Errorf("failed to update object version: %v", err)
|
||||
}
|
||||
if matched, err := pred.Matches(obj); err == nil && matched {
|
||||
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
|
||||
}
|
||||
return nil
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// recordDecodeError record decode error split by object type.
|
||||
|
||||
112
vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go
generated
vendored
112
vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go
generated
vendored
@@ -46,8 +46,9 @@ import (
|
||||
|
||||
const (
|
||||
// We have set a buffer in order to reduce times of context switches.
|
||||
incomingBufSize = 100
|
||||
outgoingBufSize = 100
|
||||
incomingBufSize = 100
|
||||
outgoingBufSize = 100
|
||||
processEventConcurrency = 10
|
||||
)
|
||||
|
||||
// defaultWatcherMaxLimit is used to facilitate construction tests
|
||||
@@ -230,8 +231,7 @@ func (wc *watchChan) run(initialEventsEndBookmarkRequired, forceInitialEvents bo
|
||||
go wc.startWatching(watchClosedCh, initialEventsEndBookmarkRequired, forceInitialEvents)
|
||||
|
||||
var resultChanWG sync.WaitGroup
|
||||
resultChanWG.Add(1)
|
||||
go wc.processEvent(&resultChanWG)
|
||||
wc.processEvents(&resultChanWG)
|
||||
|
||||
select {
|
||||
case err := <-wc.errChan:
|
||||
@@ -424,10 +424,17 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}, initialEventsEnd
|
||||
close(watchClosedCh)
|
||||
}
|
||||
|
||||
// processEvent processes events from etcd watcher and sends results to resultChan.
|
||||
func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
|
||||
// processEvents processes events from etcd watcher and sends results to resultChan.
|
||||
func (wc *watchChan) processEvents(wg *sync.WaitGroup) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ConcurrentWatchObjectDecode) {
|
||||
wc.concurrentProcessEvents(wg)
|
||||
} else {
|
||||
wg.Add(1)
|
||||
go wc.serialProcessEvents(wg)
|
||||
}
|
||||
}
|
||||
func (wc *watchChan) serialProcessEvents(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case e := <-wc.incomingEventChan:
|
||||
@@ -435,7 +442,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
|
||||
if res == nil {
|
||||
continue
|
||||
}
|
||||
if len(wc.resultChan) == outgoingBufSize {
|
||||
if len(wc.resultChan) == cap(wc.resultChan) {
|
||||
klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", wc.watcher.objectType, "groupResource", wc.watcher.groupResource)
|
||||
}
|
||||
// If user couldn't receive results fast enough, we also block incoming events from watcher.
|
||||
@@ -452,6 +459,95 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *watchChan) concurrentProcessEvents(wg *sync.WaitGroup) {
|
||||
p := concurrentOrderedEventProcessing{
|
||||
input: wc.incomingEventChan,
|
||||
processFunc: wc.transform,
|
||||
output: wc.resultChan,
|
||||
processingQueue: make(chan chan *watch.Event, processEventConcurrency-1),
|
||||
|
||||
objectType: wc.watcher.objectType,
|
||||
groupResource: wc.watcher.groupResource,
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
p.scheduleEventProcessing(wc.ctx, wg)
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
p.collectEventProcessing(wc.ctx)
|
||||
}()
|
||||
}
|
||||
|
||||
type concurrentOrderedEventProcessing struct {
|
||||
input chan *event
|
||||
processFunc func(*event) *watch.Event
|
||||
output chan watch.Event
|
||||
|
||||
processingQueue chan chan *watch.Event
|
||||
// Metadata for logging
|
||||
objectType string
|
||||
groupResource schema.GroupResource
|
||||
}
|
||||
|
||||
func (p *concurrentOrderedEventProcessing) scheduleEventProcessing(ctx context.Context, wg *sync.WaitGroup) {
|
||||
var e *event
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case e = <-p.input:
|
||||
}
|
||||
processingResponse := make(chan *watch.Event, 1)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case p.processingQueue <- processingResponse:
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(e *event, response chan<- *watch.Event) {
|
||||
defer wg.Done()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case response <- p.processFunc(e):
|
||||
}
|
||||
}(e, processingResponse)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *concurrentOrderedEventProcessing) collectEventProcessing(ctx context.Context) {
|
||||
var processingResponse chan *watch.Event
|
||||
var e *watch.Event
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case processingResponse = <-p.processingQueue:
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case e = <-processingResponse:
|
||||
}
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
if len(p.output) == cap(p.output) {
|
||||
klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", p.objectType, "groupResource", p.groupResource)
|
||||
}
|
||||
// If user couldn't receive results fast enough, we also block incoming events from watcher.
|
||||
// Because storing events in local will cause more memory usage.
|
||||
// The worst case would be closing the fast watcher.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case p.output <- *e:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *watchChan) filter(obj runtime.Object) bool {
|
||||
if wc.internalPred.Empty() {
|
||||
return true
|
||||
|
||||
Reference in New Issue
Block a user