implement ippool
1. support vlan ippool management 2. support calico ippool management Signed-off-by: Duan Jiong <djduanjiong@gmail.com>
This commit is contained in:
354
pkg/controller/network/ippool/ippool_controller.go
Normal file
354
pkg/controller/network/ippool/ippool_controller.go
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ippool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
networkInformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/utils"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCIDROverlap = fmt.Errorf("CIDR is overlap")
|
||||
)
|
||||
|
||||
type IPPoolController struct {
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
provider ippool.Provider
|
||||
|
||||
ippoolInformer networkInformer.IPPoolInformer
|
||||
ippoolSynced cache.InformerSynced
|
||||
ippoolQueue workqueue.RateLimitingInterface
|
||||
|
||||
ipamblockInformer networkInformer.IPAMBlockInformer
|
||||
ipamblockSynced cache.InformerSynced
|
||||
|
||||
client clientset.Interface
|
||||
kubesphereClient kubesphereclient.Interface
|
||||
|
||||
options ippool.Options
|
||||
}
|
||||
|
||||
func (c *IPPoolController) ippoolHandle(obj interface{}) {
|
||||
pool, ok := obj.(*networkv1alpha1.IPPool)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("IPPool informer returned non-ippool object: %#v", obj))
|
||||
return
|
||||
}
|
||||
key, err := cache.MetaNamespaceKeyFunc(pool)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for ippool %#v: %v", pool, err))
|
||||
return
|
||||
}
|
||||
|
||||
if utils.NeedToAddFinalizer(pool, networkv1alpha1.IPPoolFinalizer) || utils.IsDeletionCandidate(pool, networkv1alpha1.IPPoolFinalizer) {
|
||||
c.ippoolQueue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *IPPoolController) addFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
clone := pool.DeepCopy()
|
||||
controllerutil.AddFinalizer(clone, networkv1alpha1.IPPoolFinalizer)
|
||||
clone.Labels = map[string]string{
|
||||
networkv1alpha1.IPPoolNameLabel: clone.Name,
|
||||
networkv1alpha1.IPPoolTypeLabel: clone.Spec.Type,
|
||||
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", clone.ID()),
|
||||
}
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Error adding finalizer to pool %s: %v", pool.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("Added finalizer to pool %s", pool.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *IPPoolController) removeFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
clone := pool.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(clone, networkv1alpha1.IPPoolFinalizer)
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Error removing finalizer from pool %s: %v", pool.Name, err)
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("Removed protection finalizer from pool %s", pool.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// check cidr overlap
|
||||
func (c *IPPoolController) checkIPPool(pool *networkv1alpha1.IPPool) (bool, error) {
|
||||
_, poolCIDR, err := cnet.ParseCIDR(pool.Spec.CIDR)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", pool.ID()),
|
||||
}).String(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, p := range pools.Items {
|
||||
_, cidr, err := cnet.ParseCIDR(p.Spec.CIDR)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if cidr.IsNetOverlap(poolCIDR.IPNet) {
|
||||
return false, ErrCIDROverlap
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
|
||||
if old.Spec.Disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
clone := old.DeepCopy()
|
||||
clone.Spec.Disabled = true
|
||||
|
||||
old, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *IPPoolController) updateIPPoolStatus(old *networkv1alpha1.IPPool) error {
|
||||
new, err := c.provider.GetIPPoolStats(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(old.Status, new.Status) {
|
||||
return nil
|
||||
}
|
||||
|
||||
clone := old.DeepCopy()
|
||||
clone.Status = new.Status
|
||||
old, err = c.kubesphereClient.NetworkV1alpha1().IPPools().Update(clone)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *IPPoolController) processIPPool(name string) (*time.Duration, error) {
|
||||
klog.V(4).Infof("Processing IPPool %s", name)
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
klog.V(4).Infof("Finished processing IPPool %s (%v)", name, time.Since(startTime))
|
||||
}()
|
||||
|
||||
pool, err := c.ippoolInformer.Lister().Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if utils.IsDeletionCandidate(pool, networkv1alpha1.IPPoolFinalizer) {
|
||||
err = c.disableIPPool(pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Pool should be deleted. Check if it's used and remove finalizer if
|
||||
// it's not.
|
||||
canDelete, err := c.provider.DeleteIPPool(pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if canDelete {
|
||||
return nil, c.removeFinalizer(pool)
|
||||
}
|
||||
|
||||
//The ippool is being used, update status and try again later.
|
||||
delay := time.Second * 3
|
||||
return &delay, c.updateIPPoolStatus(pool)
|
||||
}
|
||||
|
||||
if utils.NeedToAddFinalizer(pool, networkv1alpha1.IPPoolFinalizer) {
|
||||
valid, err := c.checkIPPool(pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err = c.addFinalizer(pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.provider.CreateIPPool(pool)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Provider failed to create IPPool %s, err=%v", pool.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, c.updateIPPoolStatus(pool)
|
||||
}
|
||||
|
||||
err = c.provider.UpdateIPPool(pool)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Provider failed to update IPPool %s, err=%v", pool.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, c.updateIPPoolStatus(pool)
|
||||
}
|
||||
|
||||
func (c *IPPoolController) Start(stopCh <-chan struct{}) error {
|
||||
go c.provider.SyncStatus(stopCh, c.ippoolQueue)
|
||||
return c.Run(5, stopCh)
|
||||
}
|
||||
|
||||
func (c *IPPoolController) Run(workers int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.ippoolQueue.ShutDown()
|
||||
|
||||
klog.Info("starting ippool controller")
|
||||
defer klog.Info("shutting down ippool controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, c.ippoolSynced, c.ipamblockSynced) {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *IPPoolController) runWorker() {
|
||||
for c.processIPPoolItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *IPPoolController) processIPPoolItem() bool {
|
||||
key, quit := c.ippoolQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.ippoolQueue.Done(key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key.(string))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error parsing ippool key %q: %v", key, err))
|
||||
return true
|
||||
}
|
||||
|
||||
delay, err := c.processIPPool(name)
|
||||
if err == nil {
|
||||
c.ippoolQueue.Forget(key)
|
||||
return true
|
||||
} else if delay != nil {
|
||||
c.ippoolQueue.AddAfter(key, *delay)
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("error processing ippool %v (will retry): %v", key, err))
|
||||
c.ippoolQueue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *IPPoolController) ipamblockHandle(obj interface{}) {
|
||||
block, ok := obj.(*networkv1alpha1.IPAMBlock)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
poolName := block.Labels[networkv1alpha1.IPPoolNameLabel]
|
||||
c.ippoolQueue.Add(poolName)
|
||||
}
|
||||
|
||||
func NewIPPoolController(
|
||||
ippoolInformer networkInformer.IPPoolInformer,
|
||||
ipamblockInformer networkInformer.IPAMBlockInformer,
|
||||
client clientset.Interface,
|
||||
kubesphereClient kubesphereclient.Interface,
|
||||
options ippool.Options,
|
||||
provider ippool.Provider) *IPPoolController {
|
||||
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(func(format string, args ...interface{}) {
|
||||
klog.Info(fmt.Sprintf(format, args))
|
||||
})
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cluster-controller"})
|
||||
|
||||
c := &IPPoolController{
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
ippoolInformer: ippoolInformer,
|
||||
ippoolSynced: ippoolInformer.Informer().HasSynced,
|
||||
ippoolQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ippool"),
|
||||
ipamblockInformer: ipamblockInformer,
|
||||
ipamblockSynced: ipamblockInformer.Informer().HasSynced,
|
||||
client: client,
|
||||
kubesphereClient: kubesphereClient,
|
||||
options: options,
|
||||
provider: provider,
|
||||
}
|
||||
|
||||
ippoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.ippoolHandle,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
c.ippoolHandle(new)
|
||||
},
|
||||
})
|
||||
|
||||
//just for update ippool status
|
||||
ipamblockInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.ipamblockHandle,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
c.ipamblockHandle(new)
|
||||
},
|
||||
DeleteFunc: c.ipamblockHandle,
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
124
pkg/controller/network/ippool/ippool_controller_test.go
Normal file
124
pkg/controller/network/ippool/ippool_controller_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ippool
|
||||
|
||||
import (
|
||||
"flag"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/utils"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool/ipam"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIPPoolSuit(t *testing.T) {
|
||||
klog.InitFlags(nil)
|
||||
flag.Set("logtostderr", "true")
|
||||
flag.Set("v", "4")
|
||||
flag.Parse()
|
||||
klog.SetOutput(GinkgoWriter)
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "IPPool Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("test ippool", func() {
|
||||
pool := &v1alpha1.IPPool{
|
||||
TypeMeta: v1.TypeMeta{},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "testippool",
|
||||
},
|
||||
Spec: v1alpha1.IPPoolSpec{
|
||||
Type: v1alpha1.VLAN,
|
||||
CIDR: "192.168.0.0/24",
|
||||
BlockSize: 24,
|
||||
},
|
||||
Status: v1alpha1.IPPoolStatus{},
|
||||
}
|
||||
|
||||
ksclient := ksfake.NewSimpleClientset()
|
||||
k8sclinet := k8sfake.NewSimpleClientset()
|
||||
options := ippool.Options{}
|
||||
p := ippool.NewProvider(ksclient, options)
|
||||
ipamClient := ipam.NewIPAMClient(ksclient, v1alpha1.VLAN)
|
||||
|
||||
ksInformer := ksinformers.NewSharedInformerFactory(ksclient, 0)
|
||||
ippoolInformer := ksInformer.Network().V1alpha1().IPPools()
|
||||
ipamblockInformer := ksInformer.Network().V1alpha1().IPAMBlocks()
|
||||
c := NewIPPoolController(ippoolInformer, ipamblockInformer, k8sclinet, ksclient, options, p)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go ksInformer.Start(stopCh)
|
||||
go c.Start(stopCh)
|
||||
|
||||
It("test create ippool", func() {
|
||||
_, err := ksclient.NetworkV1alpha1().IPPools().Create(pool)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Eventually(func() bool {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
if len(result.Labels) != 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
if utils.NeedToAddFinalizer(result, v1alpha1.IPPoolFinalizer) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("test ippool stats", func() {
|
||||
ipamClient.AutoAssign(ipam.AutoAssignArgs{
|
||||
HandleID: "testhandle",
|
||||
Attrs: nil,
|
||||
Pool: "testippool",
|
||||
})
|
||||
|
||||
Eventually(func() bool {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
if result.Status.Allocations != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("test delete pool", func() {
|
||||
ipamClient.ReleaseByHandle("testhandle")
|
||||
Eventually(func() bool {
|
||||
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(pool.Name, v1.GetOptions{})
|
||||
if result.Status.Allocations != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}).Should(Equal(true))
|
||||
|
||||
err := ksclient.NetworkV1alpha1().IPPools().Delete(pool.Name, &v1.DeleteOptions{})
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
blocks, _ := ksclient.NetworkV1alpha1().IPAMBlocks().List(v1.ListOptions{})
|
||||
Expect(len(blocks.Items)).Should(Equal(0))
|
||||
})
|
||||
})
|
||||
@@ -18,6 +18,7 @@ package nsnetworkpolicy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/types"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -43,8 +44,7 @@ import (
|
||||
nspolicy "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
|
||||
workspace "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/provider"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy/provider"
|
||||
options "kubesphere.io/kubesphere/pkg/simple/client/network"
|
||||
)
|
||||
|
||||
@@ -62,7 +62,7 @@ const (
|
||||
|
||||
NodeNSNPAnnotationKey = "kubesphere.io/snat-node-ips"
|
||||
|
||||
AnnotationNPNAME = network.NSNPPrefix + "network-isolate"
|
||||
AnnotationNPNAME = types.NSNPPrefix + "network-isolate"
|
||||
|
||||
//TODO: configure it
|
||||
DNSLocalIP = "169.254.25.10"
|
||||
@@ -222,7 +222,7 @@ func (c *NSNetworkPolicyController) convertPeer(peer v1alpha1.NetworkPolicyPeer,
|
||||
func (c *NSNetworkPolicyController) convertToK8sNP(n *v1alpha1.NamespaceNetworkPolicy) (*netv1.NetworkPolicy, error) {
|
||||
np := &netv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: network.NSNPPrefix + n.Name,
|
||||
Name: types.NSNPPrefix + n.Name,
|
||||
Namespace: n.Namespace,
|
||||
},
|
||||
Spec: netv1.NetworkPolicySpec{
|
||||
@@ -564,7 +564,7 @@ func (c *NSNetworkPolicyController) syncNSNP(key string) error {
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(4).Infof("NSNP %v has been deleted", key)
|
||||
c.provider.Delete(c.provider.GetKey(network.NSNPPrefix+name, namespace))
|
||||
c.provider.Delete(c.provider.GetKey(types.NSNPPrefix+name, namespace))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
nsnppolicyinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
|
||||
workspaceinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/provider"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy/provider"
|
||||
options "kubesphere.io/kubesphere/pkg/simple/client/network"
|
||||
)
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package provider
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/types"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -35,7 +36,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -246,7 +246,7 @@ func NewNsNetworkPolicyProvider(client kubernetes.Interface, npInformer informer
|
||||
// Filter in only objects that are written by policy controller.
|
||||
m := make(map[string]interface{})
|
||||
for _, policy := range policies {
|
||||
if strings.HasPrefix(policy.Name, network.NSNPPrefix) {
|
||||
if strings.HasPrefix(policy.Name, types.NSNPPrefix) {
|
||||
policy.ObjectMeta = metav1.ObjectMeta{Name: policy.Name, Namespace: policy.Namespace}
|
||||
k := c.GetKey(policy.Name, policy.Namespace)
|
||||
m[k] = *policy
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
package types
|
||||
|
||||
const (
|
||||
NSNPPrefix = "nsnp-"
|
||||
71
pkg/controller/network/utils/utils.go
Normal file
71
pkg/controller/network/utils/utils.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// IsDeletionCandidate checks if object is candidate to be deleted
|
||||
func IsDeletionCandidate(obj metav1.Object, finalizer string) bool {
|
||||
return obj.GetDeletionTimestamp() != nil && ContainsString(obj.GetFinalizers(),
|
||||
finalizer, nil)
|
||||
}
|
||||
|
||||
// NeedToAddFinalizer checks if need to add finalizer to object
|
||||
func NeedToAddFinalizer(obj metav1.Object, finalizer string) bool {
|
||||
return obj.GetDeletionTimestamp() == nil && !ContainsString(obj.GetFinalizers(),
|
||||
finalizer, nil)
|
||||
}
|
||||
|
||||
// CopyStrings copies the contents of the specified string slice
|
||||
// into a new slice.
|
||||
func CopyStrings(s []string) []string {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]string, len(s))
|
||||
copy(c, s)
|
||||
return c
|
||||
}
|
||||
|
||||
// SortStrings sorts the specified string slice in place. It returns the same
|
||||
// slice that was provided in order to facilitate method chaining.
|
||||
func SortStrings(s []string) []string {
|
||||
sort.Strings(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// ContainsString checks if a given slice of strings contains the provided string.
|
||||
// If a modifier func is provided, it is called with the slice item before the comparation.
|
||||
func ContainsString(slice []string, s string, modifier func(s string) string) bool {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
return true
|
||||
}
|
||||
if modifier != nil && modifier(item) == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveString returns a newly created []string that contains all items from slice that
|
||||
// are not equal to s and modifier(s) in case modifier func is provided.
|
||||
func RemoveString(slice []string, s string, modifier func(s string) string) []string {
|
||||
newSlice := make([]string, 0)
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
continue
|
||||
}
|
||||
if modifier != nil && modifier(item) == s {
|
||||
continue
|
||||
}
|
||||
newSlice = append(newSlice, item)
|
||||
}
|
||||
if len(newSlice) == 0 {
|
||||
// Sanitize for unit tests so we don't need to distinguish empty array
|
||||
// and nil.
|
||||
newSlice = nil
|
||||
}
|
||||
return newSlice
|
||||
}
|
||||
Reference in New Issue
Block a user