feat: kubesphere 4.0 (#6115)

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

* feat: kubesphere 4.0

Signed-off-by: ci-bot <ci-bot@kubesphere.io>

---------

Signed-off-by: ci-bot <ci-bot@kubesphere.io>
Co-authored-by: ks-ci-bot <ks-ci-bot@example.com>
Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
KubeSphere CI Bot
2024-09-06 11:05:52 +08:00
committed by GitHub
parent b5015ec7b9
commit 447a51f08b
8557 changed files with 546695 additions and 1146174 deletions

View File

@@ -2,7 +2,6 @@
reviewers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k

View File

@@ -18,7 +18,10 @@ package registry
import (
"context"
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
@@ -72,19 +75,30 @@ func (s *DryRunnableStorage) GuaranteedUpdate(
ctx context.Context, key string, destination runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error {
if dryRun {
err := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, destination)
var current runtime.Object
v, err := conversion.EnforcePtr(destination)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
current = u.NewEmptyInstance()
} else {
current = reflect.New(v.Type()).Interface().(runtime.Object)
}
err = s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, current)
if err != nil {
return err
}
err = preconditions.Check(key, destination)
err = preconditions.Check(key, current)
if err != nil {
return err
}
rev, err := s.Versioner().ObjectResourceVersion(destination)
rev, err := s.Versioner().ObjectResourceVersion(current)
if err != nil {
return err
}
updated, _, err := tryUpdate(destination, storage.ResponseMeta{ResourceVersion: rev})
updated, _, err := tryUpdate(current, storage.ResponseMeta{ResourceVersion: rev})
if err != nil {
return err
}

View File

@@ -44,7 +44,7 @@ func StorageWithCacher() generic.StorageDecorator {
triggerFuncs storage.IndexerFuncs,
indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) {
s, d, err := generic.NewRawStorage(storageConfig, newFunc)
s, d, err := generic.NewRawStorage(storageConfig, newFunc, newListFunc, resourcePrefix)
if err != nil {
return s, d, err
}

View File

@@ -25,6 +25,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -37,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
@@ -110,6 +112,9 @@ type Store struct {
// See qualifiedResourceFromContext for details.
DefaultQualifiedResource schema.GroupResource
// SingularQualifiedResource is the singular name of the resource.
SingularQualifiedResource schema.GroupResource
// KeyRootFunc returns the root etcd key for this resource; should not
// include trailing "/". This is used for operations that work on the
// entire collection (listing and watching).
@@ -229,6 +234,8 @@ var _ rest.StandardStorage = &Store{}
var _ rest.TableConvertor = &Store{}
var _ GenericStore = &Store{}
var _ rest.SingularNameProvider = &Store{}
const (
OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
resourceCountPollPeriodJitter = 1.2
@@ -359,6 +366,16 @@ func (e *Store) ListPredicate(ctx context.Context, p storage.SelectionPredicate,
Predicate: p,
Recursive: true,
}
// if we're not already namespace-scoped, see if the field selector narrows the scope of the watch
if requestNamespace, _ := genericapirequest.NamespaceFrom(ctx); len(requestNamespace) == 0 {
if selectorNamespace, ok := p.MatchesSingleNamespace(); ok {
if len(validation.ValidateNamespaceName(selectorNamespace, false)) == 0 {
ctx = genericapirequest.WithNamespace(ctx, selectorNamespace)
}
}
}
if name, ok := p.MatchesSingle(); ok {
if key, err := e.KeyFunc(ctx, name); err == nil {
storageOpts.Recursive = false
@@ -655,6 +672,15 @@ func (e *Store) Update(ctx context.Context, name string, objInfo rest.UpdatedObj
if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
return nil, nil, err
}
// Ignore changes that only affect managed fields timestamps.
// FieldManager can't know about changes like normalized fields, defaulted
// fields and other mutations.
obj, err = fieldmanager.IgnoreManagedFieldsTimestampsTransformer(ctx, obj, existing)
if err != nil {
return nil, nil, err
}
// at this point we have a fully formed object. It is time to call the validators that the apiserver
// handling chain wants to enforce.
if updateValidation != nil {
@@ -1117,16 +1143,16 @@ func (e *Store) DeleteReturnsDeletedObject() bool {
return e.ReturnDeletedObject
}
// deleteCollectionPageSize is the size of the page used when
// listing objects from storage during DeleteCollection calls.
// It's a variable to make allow overwriting in tests.
var deleteCollectionPageSize = int64(10000)
// DeleteCollection removes all items returned by List with a given ListOptions from storage.
//
// DeleteCollection is currently NOT atomic. It can happen that only subset of objects
// will be deleted from storage, and then an error will be returned.
// In case of success, the list of deleted objects will be returned.
//
// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we
// are removing all objects of a given type) with the current API (it's technically
// possibly with storage API, but watch is not delivered correctly then).
// It will be possible to fix it with v3 etcd API.
func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
if listOptions == nil {
listOptions = &metainternalversion.ListOptions{}
@@ -1134,51 +1160,24 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
listOptions = listOptions.DeepCopy()
}
listObj, err := e.List(ctx, listOptions)
if err != nil {
return nil, err
}
items, err := meta.ExtractList(listObj)
if err != nil {
return nil, err
}
if len(items) == 0 {
// Nothing to delete, return now
return listObj, nil
}
// Spawn a number of goroutines, so that we can issue requests to storage
// in parallel to speed up deletion.
// It is proportional to the number of items to delete, up to
// DeleteCollectionWorkers (it doesn't make much sense to spawn 16
// workers to delete 10 items).
var items []runtime.Object
// TODO(wojtek-t): Decide if we don't want to start workers more opportunistically.
workersNumber := e.DeleteCollectionWorkers
if workersNumber > len(items) {
workersNumber = len(items)
}
if workersNumber < 1 {
workersNumber = 1
}
wg := sync.WaitGroup{}
toProcess := make(chan int, 2*workersNumber)
// Ensure that chanSize is not too high (to avoid wasted work) but
// at the same time high enough to start listing before we process
// the whole page.
chanSize := 2 * workersNumber
if chanSize < 256 {
chanSize = 256
}
toProcess := make(chan runtime.Object, chanSize)
errs := make(chan error, workersNumber+1)
workersExited := make(chan struct{})
distributorExited := make(chan struct{})
go func() {
defer utilruntime.HandleCrash(func(panicReason interface{}) {
errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason)
})
defer close(distributorExited)
for i := 0; i < len(items); i++ {
select {
case toProcess <- i:
case <-workersExited:
klog.V(4).InfoS("workers already exited, and there are some items waiting to be processed", "finished", i, "total", len(items))
return
}
}
close(toProcess)
}()
wg.Add(workersNumber)
for i := 0; i < workersNumber; i++ {
@@ -1189,8 +1188,8 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
})
defer wg.Done()
for index := range toProcess {
accessor, err := meta.Accessor(items[index])
for item := range toProcess {
accessor, err := meta.Accessor(item)
if err != nil {
errs <- err
return
@@ -1207,10 +1206,93 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
}
}()
}
wg.Wait()
// notify distributor to exit
close(workersExited)
<-distributorExited
// In case of all workers exit, notify distributor.
go func() {
defer utilruntime.HandleCrash(func(panicReason interface{}) {
errs <- fmt.Errorf("DeleteCollection workers closer panicked: %v", panicReason)
})
wg.Wait()
close(workersExited)
}()
hasLimit := listOptions.Limit > 0
if listOptions.Limit == 0 {
listOptions.Limit = deleteCollectionPageSize
}
// Paginate the list request and throw all items into workers.
listObj, err := func() (runtime.Object, error) {
defer close(toProcess)
processedItems := 0
var originalList runtime.Object
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
listObj, err := e.List(ctx, listOptions)
if err != nil {
return nil, err
}
newItems, err := meta.ExtractList(listObj)
if err != nil {
return nil, err
}
items = append(items, newItems...)
for i := 0; i < len(newItems); i++ {
select {
case toProcess <- newItems[i]:
case <-workersExited:
klog.V(4).InfoS("workers already exited, and there are some items waiting to be processed", "queued/finished", i, "total", processedItems+len(newItems))
// Try to propagate an error from the workers if possible.
select {
case err := <-errs:
return nil, err
default:
return nil, fmt.Errorf("all DeleteCollection workers exited")
}
}
}
processedItems += len(newItems)
// If the original request was setting the limit, finish after running it.
if hasLimit {
return listObj, nil
}
if originalList == nil {
originalList = listObj
meta.SetList(originalList, nil)
}
// If there are no more items, return the list.
m, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
if len(m.GetContinue()) == 0 {
meta.SetList(originalList, items)
return originalList, nil
}
// Set up the next loop.
listOptions.Continue = m.GetContinue()
listOptions.ResourceVersion = ""
listOptions.ResourceVersionMatch = ""
}
}()
if err != nil {
return nil, err
}
// Wait for all workers to exit.
<-workersExited
select {
case err := <-errs:
return nil, err
@@ -1268,12 +1350,21 @@ func (e *Store) Watch(ctx context.Context, options *metainternalversion.ListOpti
resourceVersion = options.ResourceVersion
predicate.AllowWatchBookmarks = options.AllowWatchBookmarks
}
return e.WatchPredicate(ctx, predicate, resourceVersion)
return e.WatchPredicate(ctx, predicate, resourceVersion, options.SendInitialEvents)
}
// WatchPredicate starts a watch for the items that matches.
func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) {
storageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p, Recursive: true}
func (e *Store) WatchPredicate(ctx context.Context, p storage.SelectionPredicate, resourceVersion string, sendInitialEvents *bool) (watch.Interface, error) {
storageOpts := storage.ListOptions{ResourceVersion: resourceVersion, Predicate: p, Recursive: true, SendInitialEvents: sendInitialEvents}
// if we're not already namespace-scoped, see if the field selector narrows the scope of the watch
if requestNamespace, _ := genericapirequest.NamespaceFrom(ctx); len(requestNamespace) == 0 {
if selectorNamespace, ok := p.MatchesSingleNamespace(); ok {
if len(validation.ValidateNamespaceName(selectorNamespace, false)) == 0 {
ctx = genericapirequest.WithNamespace(ctx, selectorNamespace)
}
}
}
key := e.KeyRootFunc(ctx)
if name, ok := p.MatchesSingle(); ok {
@@ -1320,6 +1411,12 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {
if e.DefaultQualifiedResource.Empty() {
return fmt.Errorf("store %#v must have a non-empty qualified resource", e)
}
if e.SingularQualifiedResource.Empty() {
return fmt.Errorf("store %#v must have a non-empty singular qualified resource", e)
}
if e.DefaultQualifiedResource.Group != e.SingularQualifiedResource.Group {
return fmt.Errorf("store for %#v, singular and plural qualified resource's group name's must match", e)
}
if e.NewFunc == nil {
return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String())
}
@@ -1515,6 +1612,10 @@ func (e *Store) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
return e.ResetFieldsStrategy.GetResetFields()
}
func (e *Store) GetSingularName() string {
return e.SingularQualifiedResource.Resource
}
// validateIndexers will check the prefix of indexers.
func validateIndexers(indexers *cache.Indexers) error {
if indexers == nil {

View File

@@ -47,12 +47,12 @@ func UndecoratedStorage(
getAttrsFunc storage.AttrFunc,
trigger storage.IndexerFuncs,
indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) {
return NewRawStorage(config, newFunc)
return NewRawStorage(config, newFunc, newListFunc, resourcePrefix)
}
// NewRawStorage creates the low level kv storage. This is a work-around for current
// two layer of same storage interface.
// TODO: Once cacher is enabled on all registries (event registry is special), we will remove this method.
func NewRawStorage(config *storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) {
return factory.Create(*config, newFunc)
func NewRawStorage(config *storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, factory.DestroyFunc, error) {
return factory.Create(*config, newFunc, newListFunc, resourcePrefix)
}