use istio client-go library instead of knative (#1661)
use istio client-go library instead of knative bump kubernetes dependency version change code coverage to codecov
This commit is contained in:
44
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
44
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
generated
vendored
@@ -32,7 +32,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// CacheReader is a CacheReader
|
||||
// CacheReader is a client.Reader
|
||||
var _ client.Reader = &CacheReader{}
|
||||
|
||||
// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type
|
||||
@@ -87,23 +87,26 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O
|
||||
}
|
||||
|
||||
// List lists items out of the indexer and writes them to out
|
||||
func (c *CacheReader) List(_ context.Context, opts *client.ListOptions, out runtime.Object) error {
|
||||
func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOption) error {
|
||||
var objs []interface{}
|
||||
var err error
|
||||
|
||||
if opts != nil && opts.FieldSelector != nil {
|
||||
listOpts := client.ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
|
||||
if listOpts.FieldSelector != nil {
|
||||
// TODO(directxman12): support more complicated field selectors by
|
||||
// combining multiple indicies, GetIndexers, etc
|
||||
field, val, requiresExact := requiresExactMatch(opts.FieldSelector)
|
||||
// combining multiple indices, GetIndexers, etc
|
||||
field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector)
|
||||
if !requiresExact {
|
||||
return fmt.Errorf("non-exact field matches are not supported by the cache")
|
||||
}
|
||||
// list all objects by the field selector. If this is namespaced and we have one, ask for the
|
||||
// namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces"
|
||||
// namespace.
|
||||
objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(opts.Namespace, val))
|
||||
} else if opts != nil && opts.Namespace != "" {
|
||||
objs, err = c.indexer.ByIndex(cache.NamespaceIndex, opts.Namespace)
|
||||
objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val))
|
||||
} else if listOpts.Namespace != "" {
|
||||
objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace)
|
||||
} else {
|
||||
objs = c.indexer.List()
|
||||
}
|
||||
@@ -111,27 +114,19 @@ func (c *CacheReader) List(_ context.Context, opts *client.ListOptions, out runt
|
||||
return err
|
||||
}
|
||||
var labelSel labels.Selector
|
||||
if opts != nil && opts.LabelSelector != nil {
|
||||
labelSel = opts.LabelSelector
|
||||
if listOpts.LabelSelector != nil {
|
||||
labelSel = listOpts.LabelSelector
|
||||
}
|
||||
|
||||
outItems, err := c.getListItems(objs, labelSel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return apimeta.SetList(out, outItems)
|
||||
}
|
||||
|
||||
func (c *CacheReader) getListItems(objs []interface{}, labelSel labels.Selector) ([]runtime.Object, error) {
|
||||
outItems := make([]runtime.Object, 0, len(objs))
|
||||
runtimeObjs := make([]runtime.Object, 0, len(objs))
|
||||
for _, item := range objs {
|
||||
obj, isObj := item.(runtime.Object)
|
||||
if !isObj {
|
||||
return nil, fmt.Errorf("cache contained %T, which is not an Object", obj)
|
||||
return fmt.Errorf("cache contained %T, which is not an Object", obj)
|
||||
}
|
||||
meta, err := apimeta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if labelSel != nil {
|
||||
lbls := labels.Set(meta.GetLabels())
|
||||
@@ -139,9 +134,12 @@ func (c *CacheReader) getListItems(objs []interface{}, labelSel labels.Selector)
|
||||
continue
|
||||
}
|
||||
}
|
||||
outItems = append(outItems, obj.DeepCopyObject())
|
||||
|
||||
outObj := obj.DeepCopyObject()
|
||||
outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind)
|
||||
runtimeObjs = append(runtimeObjs, outObj)
|
||||
}
|
||||
return outItems, nil
|
||||
return apimeta.SetList(out, runtimeObjs)
|
||||
}
|
||||
|
||||
// objectKeyToStorageKey converts an object key to store key.
|
||||
|
||||
2
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go
generated
vendored
2
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go
generated
vendored
@@ -73,7 +73,7 @@ func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool {
|
||||
|
||||
// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns
|
||||
// the Informer from the map.
|
||||
func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) {
|
||||
func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
|
||||
_, isUnstructured := obj.(*unstructured.Unstructured)
|
||||
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
|
||||
isUnstructured = isUnstructured || isUnstructuredList
|
||||
|
||||
99
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go
generated
vendored
99
vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go
generated
vendored
@@ -145,70 +145,65 @@ func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced {
|
||||
|
||||
// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns
|
||||
// the Informer from the map.
|
||||
func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) {
|
||||
func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) {
|
||||
// Return the informer if it is found
|
||||
i, ok := func() (*MapEntry, bool) {
|
||||
i, started, ok := func() (*MapEntry, bool, bool) {
|
||||
ip.mu.RLock()
|
||||
defer ip.mu.RUnlock()
|
||||
i, ok := ip.informersByGVK[gvk]
|
||||
return i, ok
|
||||
return i, ip.started, ok
|
||||
}()
|
||||
if ok {
|
||||
return i, nil
|
||||
|
||||
if !ok {
|
||||
var err error
|
||||
if i, started, err = ip.addInformerToMap(gvk, obj); err != nil {
|
||||
return started, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Do the mutex part in its own function so we can use defer without blocking pieces that don't
|
||||
// need to be locked
|
||||
var sync bool
|
||||
i, err := func() (*MapEntry, error) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
|
||||
// Check the cache to see if we already have an Informer. If we do, return the Informer.
|
||||
// This is for the case where 2 routines tried to get the informer when it wasn't in the map
|
||||
// so neither returned early, but the first one created it.
|
||||
var ok bool
|
||||
i, ok := ip.informersByGVK[gvk]
|
||||
if ok {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// Create a NewSharedIndexInformer and add it to the map.
|
||||
var lw *cache.ListWatch
|
||||
lw, err := ip.createListWatcher(gvk, ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ni := cache.NewSharedIndexInformer(lw, obj, ip.resync, cache.Indexers{
|
||||
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
|
||||
})
|
||||
i = &MapEntry{
|
||||
Informer: ni,
|
||||
Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk},
|
||||
}
|
||||
ip.informersByGVK[gvk] = i
|
||||
|
||||
// Start the Informer if need by
|
||||
// TODO(seans): write thorough tests and document what happens here - can you add indexers?
|
||||
// can you add eventhandlers?
|
||||
if ip.started {
|
||||
sync = true
|
||||
go i.Informer.Run(ip.stop)
|
||||
}
|
||||
return i, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sync {
|
||||
if started && !i.Informer.HasSynced() {
|
||||
// Wait for it to sync before returning the Informer so that folks don't read from a stale cache.
|
||||
if !cache.WaitForCacheSync(ip.stop, i.Informer.HasSynced) {
|
||||
return nil, fmt.Errorf("failed waiting for %T Informer to sync", obj)
|
||||
return started, nil, fmt.Errorf("failed waiting for %T Informer to sync", obj)
|
||||
}
|
||||
}
|
||||
|
||||
return i, err
|
||||
return started, i, nil
|
||||
}
|
||||
|
||||
func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
|
||||
// Check the cache to see if we already have an Informer. If we do, return the Informer.
|
||||
// This is for the case where 2 routines tried to get the informer when it wasn't in the map
|
||||
// so neither returned early, but the first one created it.
|
||||
if i, ok := ip.informersByGVK[gvk]; ok {
|
||||
return i, ip.started, nil
|
||||
}
|
||||
|
||||
// Create a NewSharedIndexInformer and add it to the map.
|
||||
var lw *cache.ListWatch
|
||||
lw, err := ip.createListWatcher(gvk, ip)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
ni := cache.NewSharedIndexInformer(lw, obj, ip.resync, cache.Indexers{
|
||||
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
|
||||
})
|
||||
i := &MapEntry{
|
||||
Informer: ni,
|
||||
Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk},
|
||||
}
|
||||
ip.informersByGVK[gvk] = i
|
||||
|
||||
// Start the Informer if need by
|
||||
// TODO(seans): write thorough tests and document what happens here - can you add indexers?
|
||||
// can you add eventhandlers?
|
||||
if ip.started {
|
||||
go i.Informer.Run(ip.stop)
|
||||
}
|
||||
return i, ip.started, nil
|
||||
}
|
||||
|
||||
// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer.
|
||||
|
||||
Reference in New Issue
Block a user