network: support network isolate

Add new crd to convert kubesphere network policy to k8s network policy, and then other network
plugin will do the rest work.

Use  cache.go from calico project's kube-controller,  it aim to  sync nsnp with k8s np, delete unused np, and relieve the pressure on k8s restful client.

If you want higher performance, you can implement interface  NsNetworkPolicyProvider in pkg/controller/provider/namespace_np.go.

Signed-off-by: Duan Jiong <djduanjiong@gmail.com>
This commit is contained in:
Duan Jiong
2020-04-15 21:42:29 +08:00
parent fc373b18e3
commit d3bdcd0465
85 changed files with 4130 additions and 6254 deletions

View File

@@ -1,6 +0,0 @@
package controllerapi
// Controller expose Run method
type Controller interface {
Run(threadiness int, stopCh <-chan struct{}) error
}

View File

@@ -1,5 +1,5 @@
package network
// +kubebuilder:rbac:groups=network.kubesphere.io,resources=workspacenetworkpolicies;namespacenetworkpolicies,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups:core,resource=namespaces,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=network.kubesphere.io,resources=namespacenetworkpolicies,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups:core,resource=namespaces,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups:core,resource=services,verbs=get;list;watch;create;update;patch
package network

View File

@@ -5,173 +5,511 @@ import (
"time"
corev1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
typev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
uruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
v1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"k8s.io/klog/klogr"
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
kubespherescheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
networkinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
networklister "kubesphere.io/kubesphere/pkg/client/listers/network/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller/network/controllerapi"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
workspacev1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
ksnetclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1"
nspolicy "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
workspace "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/network/provider"
)
const controllerAgentName = "nsnp-controller"
const (
//use period sync service label in NSNP
defaultSleepDuration = 60 * time.Second
type controller struct {
kubeClientset kubernetes.Interface
kubesphereClientset kubesphereclient.Interface
defaultThread = 5
defaultSync = "5m"
nsnpInformer networkinformer.NamespaceNetworkPolicyInformer
nsnpLister networklister.NamespaceNetworkPolicyLister
nsnpSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
nsNetworkPolicyProvider provider.NsNetworkPolicyProvider
}
//whether network isolate is enable in namespace
NamespaceNPAnnotationKey = "kubesphere.io/network-isolate"
NamespaceNPAnnotationEnabled = "enabled"
var (
log = klogr.New().WithName("Controller").WithValues("Component", controllerAgentName)
errCount = 0
AnnotationNPNAME = "network-isolate"
)
func NewController(kubeclientset kubernetes.Interface,
kubesphereclientset kubesphereclient.Interface,
nsnpInformer networkinformer.NamespaceNetworkPolicyInformer,
nsNetworkPolicyProvider provider.NsNetworkPolicyProvider) controllerapi.Controller {
utilruntime.Must(kubespherescheme.AddToScheme(scheme.Scheme))
log.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
ctl := &controller{
kubeClientset: kubeclientset,
kubesphereClientset: kubesphereclientset,
nsnpInformer: nsnpInformer,
nsnpLister: nsnpInformer.Lister(),
nsnpSynced: nsnpInformer.Informer().HasSynced,
nsNetworkPolicyProvider: nsNetworkPolicyProvider,
// namespacenpController implements the Controller interface for managing kubesphere network policies
// and convery them to k8s NetworkPolicies, then syncing them to the provider.
type NSNetworkPolicyController struct {
client kubernetes.Interface
ksclient ksnetclient.NetworkV1alpha1Interface
informer nspolicy.NamespaceNetworkPolicyInformer
informerSynced cache.InformerSynced
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "NamespaceNetworkPolicies"),
recorder: recorder,
}
log.Info("Setting up event handlers")
nsnpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.enqueueNSNP,
UpdateFunc: func(old, new interface{}) {
ctl.enqueueNSNP(new)
},
DeleteFunc: ctl.enqueueNSNP,
})
return ctl
serviceInformer v1.ServiceInformer
serviceInformerSynced cache.InformerSynced
workspaceInformer workspace.WorkspaceInformer
workspaceInformerSynced cache.InformerSynced
namespaceInformer v1.NamespaceInformer
namespaceInformerSynced cache.InformerSynced
provider provider.NsNetworkPolicyProvider
nsQueue workqueue.RateLimitingInterface
nsnpQueue workqueue.RateLimitingInterface
}
func (c *controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
//init client
// Start the informer factories to begin populating the informer caches
log.V(1).Info("Starting WSNP controller")
// Wait for the caches to be synced before starting workers
log.V(2).Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.nsnpSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
func (c *NSNetworkPolicyController) convertPeer(peers []v1alpha1.NetworkPolicyPeer) ([]netv1.NetworkPolicyPeer, error) {
if len(peers) <= 0 {
return nil, nil
}
log.Info("Starting workers")
// Launch two workers to process Foo resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
rules := make([]netv1.NetworkPolicyPeer, 0)
for _, peer := range peers {
rule := netv1.NetworkPolicyPeer{}
if peer.ServiceSelector != nil {
rule.PodSelector = new(metav1.LabelSelector)
rule.NamespaceSelector = new(metav1.LabelSelector)
namespace := peer.ServiceSelector.Namespace
name := peer.ServiceSelector.Name
service, err := c.serviceInformer.Lister().Services(namespace).Get(name)
if err != nil {
return nil, err
}
if len(service.Spec.Selector) <= 0 {
return nil, fmt.Errorf("service %s/%s has no podselect", namespace, name)
}
rule.PodSelector.MatchLabels = make(map[string]string)
for key, value := range service.Spec.Selector {
rule.PodSelector.MatchLabels[key] = value
}
rule.NamespaceSelector.MatchLabels = make(map[string]string)
rule.NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = namespace
} else if peer.NamespaceSelector != nil {
name := peer.NamespaceSelector.Name
rule.NamespaceSelector = new(metav1.LabelSelector)
rule.NamespaceSelector.MatchLabels = make(map[string]string)
rule.NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = name
} else if peer.IPBlock != nil {
rule.IPBlock = peer.IPBlock
} else {
klog.Errorf("Invalid nsnp peer %v\n", peer)
continue
}
rules = append(rules, rule)
}
return rules, nil
}
func (c *NSNetworkPolicyController) convertToK8sNP(n *v1alpha1.NamespaceNetworkPolicy) (*netv1.NetworkPolicy, error) {
np := &netv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: n.Name,
Namespace: n.Namespace,
},
Spec: netv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: make([]netv1.PolicyType, 0),
},
}
if n.Spec.Egress != nil {
np.Spec.Egress = make([]netv1.NetworkPolicyEgressRule, len(n.Spec.Egress))
for indexEgress, egress := range n.Spec.Egress {
rules, err := c.convertPeer(egress.To)
if err != nil {
return nil, err
}
np.Spec.Egress[indexEgress].To = rules
np.Spec.Egress[indexEgress].Ports = egress.Ports
}
np.Spec.PolicyTypes = append(np.Spec.PolicyTypes, netv1.PolicyTypeEgress)
}
if n.Spec.Ingress != nil {
np.Spec.Ingress = make([]netv1.NetworkPolicyIngressRule, len(n.Spec.Ingress))
for indexIngress, ingress := range n.Spec.Ingress {
rules, err := c.convertPeer(ingress.From)
if err != nil {
return nil, err
}
np.Spec.Ingress[indexIngress].From = rules
np.Spec.Ingress[indexIngress].Ports = ingress.Ports
}
np.Spec.PolicyTypes = append(np.Spec.PolicyTypes, netv1.PolicyTypeIngress)
}
return np, nil
}
func generateNSNP(workspace string, namespace string, matchWorkspace bool) *netv1.NetworkPolicy {
policy := &netv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: AnnotationNPNAME,
Namespace: namespace,
},
Spec: netv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: make([]netv1.PolicyType, 0),
Ingress: []netv1.NetworkPolicyIngressRule{{
From: []netv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{},
},
}},
}},
Egress: []netv1.NetworkPolicyEgressRule{{
To: []netv1.NetworkPolicyPeer{{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{},
},
}},
}},
},
}
policy.Spec.PolicyTypes = append(policy.Spec.PolicyTypes, netv1.PolicyTypeIngress, netv1.PolicyTypeEgress)
if matchWorkspace {
policy.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[constants.WorkspaceLabelKey] = workspace
policy.Spec.Egress[0].To[0].NamespaceSelector.MatchLabels[constants.WorkspaceLabelKey] = workspace
} else {
policy.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = namespace
policy.Spec.Egress[0].To[0].NamespaceSelector.MatchLabels[constants.NamespaceLabelKey] = namespace
}
return policy
}
func (c *NSNetworkPolicyController) nsEnqueue(ns *corev1.Namespace) {
key, err := cache.MetaNamespaceKeyFunc(ns)
if err != nil {
uruntime.HandleError(fmt.Errorf("Get namespace key %s failed", ns.Name))
return
}
klog.V(4).Infof("Enqueue namespace %s", ns.Name)
c.nsQueue.Add(key)
}
func (c *NSNetworkPolicyController) addWorkspace(newObj interface{}) {
new := newObj.(*workspacev1alpha1.Workspace)
klog.V(4).Infof("Add workspace %s", new.Name)
label := labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: new.Name})
nsList, err := c.namespaceInformer.Lister().List(label)
if err != nil {
klog.Errorf("Error while list namespace by label %s", label.String())
return
}
for _, ns := range nsList {
c.nsEnqueue(ns)
}
}
func (c *NSNetworkPolicyController) addNamespace(obj interface{}) {
ns := obj.(*corev1.Namespace)
workspaceName := ns.Labels[constants.WorkspaceLabelKey]
if workspaceName == "" {
return
}
klog.V(4).Infof("Add namespace %s", ns.Name)
c.nsEnqueue(ns)
}
func isNetworkIsolateEnabled(ns *corev1.Namespace) bool {
if ns.Annotations[NamespaceNPAnnotationKey] == NamespaceNPAnnotationEnabled {
return true
}
return false
}
func hadNamespaceLabel(ns *corev1.Namespace) bool {
if ns.Annotations[constants.NamespaceLabelKey] == ns.Name {
return true
}
return false
}
func (c *NSNetworkPolicyController) syncNs(key string) error {
klog.V(4).Infof("Sync namespace %s", key)
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.Errorf("Not a valid controller key %s, %#v", key, err)
return err
}
ns, err := c.namespaceInformer.Lister().Get(name)
if err != nil {
// not found, possibly been deleted
if errors.IsNotFound(err) {
klog.V(2).Infof("Namespace %v has been deleted", key)
return nil
}
return err
}
workspaceName := ns.Labels[constants.WorkspaceLabelKey]
if workspaceName == "" {
klog.Error("Workspace name should not be empty")
return nil
}
wksp, err := c.workspaceInformer.Lister().Get(workspaceName)
if err != nil {
//Should not be here
if errors.IsNotFound(err) {
klog.V(2).Infof("Workspace %v has been deleted", workspaceName)
return nil
}
return err
}
//Maybe some ns not labeled
if !hadNamespaceLabel(ns) {
ns.Labels[constants.NamespaceLabelKey] = ns.Name
_, err := c.client.CoreV1().Namespaces().Update(ns)
if err != nil {
//Just log, label can also be added by namespace controller
klog.Errorf("cannot label namespace %s", ns.Name)
}
}
matchWorkspace := false
delete := false
if isNetworkIsolateEnabled(ns) {
matchWorkspace = false
} else if wksp.Spec.NetworkIsolation {
matchWorkspace = true
} else {
delete = true
}
policy := generateNSNP(workspaceName, ns.Name, matchWorkspace)
if delete {
c.provider.Delete(c.provider.GetKey(AnnotationNPNAME, ns.Name))
//delete all namespace np when networkisolate not active
if c.ksclient.NamespaceNetworkPolicies(ns.Name).DeleteCollection(nil, typev1.ListOptions{}) != nil {
klog.Errorf("Error when delete all nsnps in namespace %s", ns.Name)
}
} else {
err = c.provider.Set(policy)
if err != nil {
klog.Errorf("Error while converting %#v to provider's network policy.", policy)
return err
}
}
klog.V(2).Info("Started workers")
<-stopCh
log.V(2).Info("Shutting down workers")
return nil
}
func (c *controller) enqueueNSNP(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
}
func (c *controller) runWorker() {
for c.processNextWorkItem() {
func (c *NSNetworkPolicyController) nsWorker() {
for c.processNsWorkItem() {
}
}
func (c *controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
func (c *NSNetworkPolicyController) processNsWorkItem() bool {
key, quit := c.nsQueue.Get()
if quit {
return false
}
defer c.nsQueue.Done(key)
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.reconcile(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
log.Info("Successfully synced", "key", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
if err := c.syncNs(key.(string)); err != nil {
klog.Errorf("Error when syncns %s", err)
}
return true
}
func (c *NSNetworkPolicyController) nsnpEnqueue(obj interface{}) {
nsnp := obj.(*v1alpha1.NamespaceNetworkPolicy)
key, err := cache.MetaNamespaceKeyFunc(nsnp)
if err != nil {
uruntime.HandleError(fmt.Errorf("get namespace network policy key %s failed", nsnp.Name))
return
}
c.nsnpQueue.Add(key)
}
func (c *NSNetworkPolicyController) syncNSNP(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.Errorf("Not a valid controller key %s, %#v", key, err)
return err
}
ns, err := c.namespaceInformer.Lister().Get(namespace)
if !isNetworkIsolateEnabled(ns) {
klog.Infof("Delete NSNP %s when namespace isolate is inactive", key)
c.provider.Delete(c.provider.GetKey(name, namespace))
return nil
}
nsnp, err := c.informer.Lister().NamespaceNetworkPolicies(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
klog.V(4).Infof("NSNP %v has been deleted", key)
c.provider.Delete(c.provider.GetKey(name, namespace))
return nil
}
return err
}
np, err := c.convertToK8sNP(nsnp)
if err != nil {
klog.Errorf("Error while convert nsnp to k8snp: %s", err)
return err
}
err = c.provider.Set(np)
if err != nil {
return err
}
return nil
}
func (c *NSNetworkPolicyController) nsNPWorker() {
for c.processNSNPWorkItem() {
}
}
func (c *NSNetworkPolicyController) processNSNPWorkItem() bool {
key, quit := c.nsnpQueue.Get()
if quit {
return false
}
defer c.nsnpQueue.Done(key)
c.syncNSNP(key.(string))
return true
}
// NewnamespacenpController returns a controller which manages NSNSP objects.
func NewNSNetworkPolicyController(
client kubernetes.Interface,
ksclient ksnetclient.NetworkV1alpha1Interface,
nsnpInformer nspolicy.NamespaceNetworkPolicyInformer,
serviceInformer v1.ServiceInformer,
workspaceInformer workspace.WorkspaceInformer,
namespaceInformer v1.NamespaceInformer,
policyProvider provider.NsNetworkPolicyProvider) *NSNetworkPolicyController {
controller := &NSNetworkPolicyController{
client: client,
ksclient: ksclient,
informer: nsnpInformer,
informerSynced: nsnpInformer.Informer().HasSynced,
serviceInformer: serviceInformer,
serviceInformerSynced: serviceInformer.Informer().HasSynced,
workspaceInformer: workspaceInformer,
workspaceInformerSynced: workspaceInformer.Informer().HasSynced,
namespaceInformer: namespaceInformer,
namespaceInformerSynced: namespaceInformer.Informer().HasSynced,
provider: policyProvider,
nsQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
nsnpQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespacenp"),
}
workspaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.addWorkspace,
UpdateFunc: func(oldObj, newObj interface{}) {
old := oldObj.(*workspacev1alpha1.Workspace)
new := oldObj.(*workspacev1alpha1.Workspace)
if old.Spec.NetworkIsolation == new.Spec.NetworkIsolation {
return
}
controller.addWorkspace(newObj)
},
})
namespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.addNamespace,
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
old := oldObj.(*corev1.Namespace)
new := oldObj.(*corev1.Namespace)
if old.Annotations[NamespaceNPAnnotationKey] == new.Annotations[NamespaceNPAnnotationKey] {
return
}
controller.addNamespace(newObj)
},
})
nsnpInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
klog.V(4).Infof("Got ADD event for NSNSP: %#v", obj)
controller.nsnpEnqueue(obj)
},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
klog.V(4).Info("Got UPDATE event for NSNSP.")
klog.V(4).Infof("Old object: \n%#v\n", oldObj)
klog.V(4).Infof("New object: \n%#v\n", newObj)
controller.nsnpEnqueue(newObj)
},
DeleteFunc: func(obj interface{}) {
klog.V(4).Infof("Got DELETE event for NSNP: %#v", obj)
controller.nsnpEnqueue(obj)
},
}, defaultSleepDuration)
return controller
}
func (c *NSNetworkPolicyController) Start(stopCh <-chan struct{}) error {
return c.Run(defaultThread, defaultSync, stopCh)
}
// Run starts the controller.
func (c *NSNetworkPolicyController) Run(threadiness int, reconcilerPeriod string, stopCh <-chan struct{}) error {
defer uruntime.HandleCrash()
defer c.nsQueue.ShutDown()
defer c.nsnpQueue.ShutDown()
klog.Info("Waiting to sync with Kubernetes API (NSNP)")
if ok := cache.WaitForCacheSync(stopCh, c.informerSynced, c.serviceInformerSynced, c.workspaceInformerSynced, c.namespaceInformerSynced); !ok {
return fmt.Errorf("Failed to wait for caches to sync")
}
klog.Info("Finished syncing with Kubernetes API (NSNP)")
// Start a number of worker threads to read from the queue. Each worker
// will pull keys off the resource cache event queue and sync them to the
// K8s datastore.
for i := 0; i < threadiness; i++ {
go wait.Until(c.nsWorker, time.Second, stopCh)
go wait.Until(c.nsNPWorker, time.Second, stopCh)
}
//Work to sync K8s NetworkPolicy
go c.provider.Start(stopCh)
klog.Info("NSNP controller is now running")
<-stopCh
klog.Info("Stopping NSNP controller")
return nil
}

View File

@@ -11,8 +11,7 @@ import (
func TestNsnetworkpolicy(t *testing.T) {
klog.InitFlags(nil)
flag.Set("logtostderr", "false")
flag.Set("alsologtostderr", "false")
flag.Set("logtostderr", "true")
flag.Set("v", "4")
flag.Parse()
klog.SetOutput(GinkgoWriter)

View File

@@ -1,90 +1,285 @@
package nsnetworkpolicy
import (
"fmt"
"reflect"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
nsnplister "kubesphere.io/kubesphere/pkg/client/listers/network/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller/network/controllerapi"
corev1 "k8s.io/api/core/v1"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/util/yaml"
kubeinformers "k8s.io/client-go/informers"
informerv1 "k8s.io/client-go/informers/core/v1"
kubefake "k8s.io/client-go/kubernetes/fake"
netv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
wkspv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
nsnppolicyinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
workspaceinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/network/provider"
controllertesting "kubesphere.io/kubesphere/pkg/controller/network/testing"
)
var (
fakeControllerBuilder *controllertesting.FakeControllerBuilder
c controllerapi.Controller
stopCh chan struct{}
calicoProvider *provider.FakeCalicoNetworkProvider
nsnpLister nsnplister.NamespaceNetworkPolicyLister
c *NSNetworkPolicyController
stopCh chan struct{}
nsnpInformer nsnppolicyinformer.NamespaceNetworkPolicyInformer
serviceInformer informerv1.ServiceInformer
workspaceInformer workspaceinformer.WorkspaceInformer
namespaceInformer informerv1.NamespaceInformer
alwaysReady = func() bool { return true }
)
const (
workspaceNP = `
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: networkisolate
namespace: %s
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
%s: %s
Egress:
- To:
- namespaceSelector:
matchLabels:
%s: %s
policyTypes:
- Ingress
- Egress`
serviceTmp = `
apiVersion: v1
kind: Service
metadata:
name: myservice
namespace: testns
spec:
selector:
app: mylbapp
ports:
- protocol: TCP
port: 80
targetPort: 9376
`
workspaceTmp = `
apiVersion: tenant.kubesphere.io/v1alpha1
kind: Workspace
metadata:
annotations:
kubesphere.io/creator: admin
name: testworkspace
spec:
manager: admin
networkIsolation: true
status: {}
`
nsTmp = `
apiVersion: v1
kind: Namespace
metadata:
labels:
kubesphere.io/workspace: testworkspace
name: testns
spec:
finalizers:
- kubernetes
`
)
func StringToObject(data string, obj interface{}) error {
reader := strings.NewReader(data)
return yaml.NewYAMLOrJSONDecoder(reader, 10).Decode(obj)
}
var _ = Describe("Nsnetworkpolicy", func() {
BeforeEach(func() {
fakeControllerBuilder = controllertesting.NewFakeControllerBuilder()
stopCh = make(chan struct{})
informer, _ := fakeControllerBuilder.NewControllerInformer()
calicoProvider = provider.NewFakeCalicoNetworkProvider()
c = NewController(fakeControllerBuilder.KubeClient, fakeControllerBuilder.KsClient, informer.Network().V1alpha1().NamespaceNetworkPolicies(), calicoProvider)
go informer.Network().V1alpha1().NamespaceNetworkPolicies().Informer().Run(stopCh)
originalController := c.(*controller)
originalController.recorder = &record.FakeRecorder{}
go c.Run(1, stopCh)
nsnpLister = informer.Network().V1alpha1().NamespaceNetworkPolicies().Lister()
calicoProvider := provider.NewFakeNetworkProvider()
kubeClient := kubefake.NewSimpleClientset()
ksClient := ksfake.NewSimpleClientset()
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
ksInformer := ksinformers.NewSharedInformerFactory(ksClient, 0)
nsnpInformer := ksInformer.Network().V1alpha1().NamespaceNetworkPolicies()
serviceInformer := kubeInformer.Core().V1().Services()
workspaceInformer := ksInformer.Tenant().V1alpha1().Workspaces()
namespaceInformer := kubeInformer.Core().V1().Namespaces()
c = NewNSNetworkPolicyController(kubeClient, ksClient.NetworkV1alpha1(), nsnpInformer, serviceInformer, workspaceInformer, namespaceInformer, calicoProvider)
serviceObj := &corev1.Service{}
Expect(StringToObject(serviceTmp, serviceObj)).ShouldNot(HaveOccurred())
Expect(serviceInformer.Informer().GetIndexer().Add(serviceObj)).ShouldNot(HaveOccurred())
nsObj := &corev1.Namespace{}
Expect(StringToObject(nsTmp, nsObj)).ShouldNot(HaveOccurred())
namespaceInformer.Informer().GetIndexer().Add(nsObj)
workspaceObj := &wkspv1alpha1.Workspace{}
Expect(StringToObject(workspaceTmp, workspaceObj)).ShouldNot(HaveOccurred())
workspaceInformer.Informer().GetIndexer().Add(workspaceObj)
c.namespaceInformerSynced = alwaysReady
c.serviceInformerSynced = alwaysReady
c.workspaceInformerSynced = alwaysReady
c.informerSynced = alwaysReady
go c.Start(stopCh)
})
It("Should create a new calico object", func() {
objSrt := `{
"apiVersion": "network.kubesphere.io/v1alpha1",
"kind": "NetworkPolicy",
"metadata": {
"name": "allow-tcp-6379",
"namespace": "production"
},
"spec": {
"selector": "color == 'red'",
"ingress": [
{
"action": "Allow",
"protocol": "TCP",
"source": {
"selector": "color == 'blue'"
},
"destination": {
"ports": [
6379
]
}
}
]
}
}`
obj := &v1alpha1.NamespaceNetworkPolicy{}
Expect(controllertesting.StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
_, err := fakeControllerBuilder.KsClient.NetworkV1alpha1().NamespaceNetworkPolicies(obj.Namespace).Create(obj)
It("Should create ns networkisolate np correctly in workspace", func() {
objSrt := fmt.Sprintf(workspaceNP, "testns", constants.WorkspaceLabelKey, "testworkspace", constants.WorkspaceLabelKey, "testworkspace")
obj := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
policy := generateNSNP("testworkspace", "testns", true)
Expect(reflect.DeepEqual(obj.Spec, policy.Spec)).To(BeTrue())
})
It("Should create ns networkisolate np correctly in ns", func() {
objSrt := fmt.Sprintf(workspaceNP, "testns", constants.NamespaceLabelKey, "testns", constants.NamespaceLabelKey, "testns")
obj := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
policy := generateNSNP("testworkspace", "testns", false)
Expect(reflect.DeepEqual(obj.Spec, policy.Spec)).To(BeTrue())
})
It("test func convertToK8sNP", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: namespaceIPblockNP
namespace: testns
spec:
ingress:
- from:
- ipBlock:
cidr: 172.0.0.1/16
ports:
- protocol: TCP
port: 80
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
policy, err := c.convertToK8sNP(obj)
objSrt = `
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: IPblockNP
namespace: testns
spec:
ingress:
- from:
- ipBlock:
cidr: 172.0.0.1/16
ports:
- protocol: TCP
port: 80
policyTypes:
- Ingress
`
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
Expect(err).ShouldNot(HaveOccurred())
Eventually(func() bool {
exist, _ := calicoProvider.CheckExist(obj)
return exist
}).Should(BeTrue())
obj, _ = fakeControllerBuilder.KsClient.NetworkV1alpha1().NamespaceNetworkPolicies(obj.Namespace).Get(obj.Name, metav1.GetOptions{})
Expect(obj.Finalizers).To(HaveLen(1))
// TestUpdate
newStr := "color == 'green'"
obj.Spec.Selector = newStr
_, err = fakeControllerBuilder.KsClient.NetworkV1alpha1().NamespaceNetworkPolicies(obj.Namespace).Update(obj)
Expect(reflect.DeepEqual(obj2.Spec, policy.Spec)).To(BeTrue())
})
It("test func convertToK8sNP with namespace", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: testnamespace
namespace: testns2
spec:
ingress:
- from:
- namespace:
name: testns
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
np, err := c.convertToK8sNP(obj)
Expect(err).ShouldNot(HaveOccurred())
Eventually(func() string {
o, err := calicoProvider.Get(obj)
if err != nil {
return err.Error()
}
n := o.(*v1alpha1.NamespaceNetworkPolicy)
return n.Spec.Selector
}).Should(Equal(newStr))
// TestDelete
Expect(fakeControllerBuilder.KsClient.NetworkV1alpha1().NamespaceNetworkPolicies(obj.Namespace).Delete(obj.Name, &metav1.DeleteOptions{})).ShouldNot(HaveOccurred())
objTmp := `
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: testnamespace
namespace: testns2
spec:
podSelector: {}
ingress:
- from:
- namespaceSelector:
matchLabels:
%s: %s
policyTypes:
- Ingress`
objSrt = fmt.Sprintf(objTmp, constants.NamespaceLabelKey, "testns")
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
Expect(reflect.DeepEqual(np.Spec, obj2.Spec)).To(BeTrue())
})
It("test func convertToK8sNP with service", func() {
objSrt := `
apiVersion: network.kubesphere.io/v1alpha1
kind: NamespaceNetworkPolicy
metadata:
name: testnamespace
namespace: testns2
spec:
ingress:
- from:
- service:
name: myservice
namespace: testns
`
obj := &netv1alpha1.NamespaceNetworkPolicy{}
Expect(StringToObject(objSrt, obj)).ShouldNot(HaveOccurred())
np, err := c.convertToK8sNP(obj)
Expect(err).ShouldNot(HaveOccurred())
objSrt = `
apiVersion: "networking.k8s.io/v1"
kind: NetworkPolicy
metadata:
name: networkisolate
namespace: testns
spec:
podSelector: {}
ingress:
- from:
- podSelector:
matchLabels:
app: mylbapp
namespaceSelector:
matchLabels:
kubesphere.io/namespace: testns
policyTypes:
- Ingress
`
obj2 := &netv1.NetworkPolicy{}
Expect(StringToObject(objSrt, obj2)).ShouldNot(HaveOccurred())
Expect(reflect.DeepEqual(np.Spec, obj2.Spec)).To(BeTrue())
})
AfterEach(func() {

View File

@@ -1,119 +0,0 @@
package nsnetworkpolicy
import (
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller/network/utils"
)
const (
controllerFinalizier = "nsnp.finalizers.networking.kubesphere.io"
)
var clog logr.Logger
func (c *controller) reconcile(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
clog = log.WithValues("name", name, "namespace", namespace)
clog.V(1).Info("---------Begin to reconcile--------")
defer clog.V(1).Info("---------Reconcile done--------")
obj, err := c.nsnpLister.NamespaceNetworkPolicies(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
clog.V(2).Info("Object is removed")
return nil
}
clog.Error(err, "Failed to get resource")
return err
}
stop, err := c.addOrRemoveFinalizer(obj)
if err != nil {
return err
}
if stop {
return nil
}
clog.V(2).Info("Check if we need a create or update")
ok, err := c.nsNetworkPolicyProvider.CheckExist(obj)
if err != nil {
clog.Error(err, "Failed to check exist of network policy")
return err
}
if !ok {
clog.V(1).Info("Create a new object in backend")
err = c.nsNetworkPolicyProvider.Add(obj)
if err != nil {
clog.Error(err, "Failed to create np")
return err
}
return nil
}
needUpdate, err := c.nsNetworkPolicyProvider.NeedUpdate(obj)
if err != nil {
clog.Error(err, "Failed to check if object need a update")
return err
}
if needUpdate {
clog.V(1).Info("Update object in backend")
err = c.nsNetworkPolicyProvider.Update(obj)
if err != nil {
clog.Error(err, "Failed to update object")
return err
}
}
return nil
}
func (c *controller) addOrRemoveFinalizer(obj *v1alpha1.NamespaceNetworkPolicy) (bool, error) {
if obj.ObjectMeta.DeletionTimestamp.IsZero() {
if !utils.ContainsString(obj.ObjectMeta.Finalizers, controllerFinalizier) {
clog.V(2).Info("Detect no finalizer")
obj.ObjectMeta.Finalizers = append(obj.ObjectMeta.Finalizers, controllerFinalizier)
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, err := c.kubesphereClientset.NetworkV1alpha1().NamespaceNetworkPolicies(obj.Namespace).Update(obj)
return err
})
if err != nil {
clog.Error(err, "Failed to add finalizer")
return false, err
}
return false, nil
}
} else {
// The object is being deleted
if utils.ContainsString(obj.ObjectMeta.Finalizers, controllerFinalizier) {
// our finalizer is present, so lets handle any external dependency
if err := c.deleteProviderNSNP(obj); err != nil {
// if fail to delete the external dependency here, return with error
// so that it can be retried
return false, err
}
clog.V(2).Info("Removing finalizer")
// remove our finalizer from the list and update it.
obj.ObjectMeta.Finalizers = utils.RemoveString(obj.ObjectMeta.Finalizers, controllerFinalizier)
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, err := c.kubesphereClientset.NetworkV1alpha1().NamespaceNetworkPolicies(obj.Namespace).Update(obj)
return err
})
if err != nil {
clog.Error(err, "Failed to remove finalizer")
return false, err
}
return true, nil
}
}
return false, nil
}
// deleteProviderNSNP delete network policy in the backend
func (c *controller) deleteProviderNSNP(obj *v1alpha1.NamespaceNetworkPolicy) error {
clog.V(2).Info("Deleting backend network policy")
return c.nsNetworkPolicyProvider.Delete(obj)
}

View File

@@ -0,0 +1,38 @@
package nsnetworkpolicy
import (
"context"
"fmt"
"net/http"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// +kubebuilder:webhook:path=/validate-service-nsnp-kubesphere-io-v1alpha1-network,name=validate-v1-service,mutating=false,failurePolicy=fail,groups="",resources=services,verbs=create;update,versions=v1
// serviceValidator validates service
type ServiceValidator struct {
decoder *admission.Decoder
}
// Service must hash label, becasue nsnp will use it
func (v *ServiceValidator) Handle(ctx context.Context, req admission.Request) admission.Response {
service := &corev1.Service{}
err := v.decoder.Decode(req, service)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if service.Spec.Selector == nil {
return admission.Denied(fmt.Sprintf("missing label"))
}
return admission.Allowed("")
}
func (a *ServiceValidator) InjectDecoder(d *admission.Decoder) error {
a.decoder = d
return nil
}

View File

@@ -1,3 +0,0 @@
package provider
// +kubebuilder:rbac:groups="crd.projectcalico.org",resources=globalfelixconfigs;felixconfigurations;ippools;ipamblocks;globalnetworkpolicies;globalnetworksets;networkpolicies;networksets;clusterinformations;hostendpoints,verbs=get;list;watch;create;patch;update;delete

View File

@@ -0,0 +1,49 @@
package provider
import (
"fmt"
"github.com/projectcalico/kube-controllers/pkg/converter"
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
constants "github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
v1 "k8s.io/api/networking/v1"
)
func NewFakeNetworkProvider() *FakeNetworkProvider {
f := new(FakeNetworkProvider)
f.NSNPData = make(map[string]*api.NetworkPolicy)
f.policyConverter = converter.NewPolicyConverter()
return f
}
type FakeNetworkProvider struct {
NSNPData map[string]*api.NetworkPolicy
policyConverter converter.Converter
}
func (f *FakeNetworkProvider) Delete(key string) {
delete(f.NSNPData, key)
}
func (f *FakeNetworkProvider) Start(stopCh <-chan struct{}) {
}
func (f *FakeNetworkProvider) Set(np *v1.NetworkPolicy) error {
policy, err := f.policyConverter.Convert(np)
if err != nil {
return err
}
// Add to cache.
k := f.policyConverter.GetKey(policy)
tmp := policy.(api.NetworkPolicy)
f.NSNPData[k] = &tmp
return nil
}
func (f *FakeNetworkProvider) GetKey(name, nsname string) string {
policyName := fmt.Sprintf(constants.K8sNetworkPolicyNamePrefix + name)
return fmt.Sprintf("%s/%s", nsname, policyName)
}

View File

@@ -1,66 +0,0 @@
package provider
import (
"reflect"
"github.com/projectcalico/libcalico-go/lib/errors"
"k8s.io/client-go/tools/cache"
api "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
)
func NewFakeCalicoNetworkProvider() *FakeCalicoNetworkProvider {
f := new(FakeCalicoNetworkProvider)
f.NSNPData = make(map[string]*api.NamespaceNetworkPolicy)
return f
}
type FakeCalicoNetworkProvider struct {
NSNPData map[string]*api.NamespaceNetworkPolicy
}
func (f *FakeCalicoNetworkProvider) Get(o *api.NamespaceNetworkPolicy) (interface{}, error) {
namespacename, _ := cache.MetaNamespaceKeyFunc(o)
obj, ok := f.NSNPData[namespacename]
if !ok {
return nil, errors.ErrorResourceDoesNotExist{}
}
return obj, nil
}
func (f *FakeCalicoNetworkProvider) Add(o *api.NamespaceNetworkPolicy) error {
namespacename, _ := cache.MetaNamespaceKeyFunc(o)
if _, ok := f.NSNPData[namespacename]; ok {
return errors.ErrorResourceAlreadyExists{}
}
f.NSNPData[namespacename] = o
return nil
}
func (f *FakeCalicoNetworkProvider) CheckExist(o *api.NamespaceNetworkPolicy) (bool, error) {
namespacename, _ := cache.MetaNamespaceKeyFunc(o)
if _, ok := f.NSNPData[namespacename]; ok {
return true, nil
}
return false, nil
}
func (f *FakeCalicoNetworkProvider) NeedUpdate(o *api.NamespaceNetworkPolicy) (bool, error) {
namespacename, _ := cache.MetaNamespaceKeyFunc(o)
store := f.NSNPData[namespacename]
if !reflect.DeepEqual(store, o) {
return true, nil
}
return false, nil
}
func (f *FakeCalicoNetworkProvider) Update(o *api.NamespaceNetworkPolicy) error {
namespacename, _ := cache.MetaNamespaceKeyFunc(o)
f.NSNPData[namespacename] = o
return nil
}
func (f *FakeCalicoNetworkProvider) Delete(o *api.NamespaceNetworkPolicy) error {
namespacename, _ := cache.MetaNamespaceKeyFunc(o)
delete(f.NSNPData, namespacename)
return nil
}

View File

@@ -1 +0,0 @@
package provider

View File

@@ -1,35 +1,11 @@
package provider
import (
k8snetworkinformer "k8s.io/client-go/informers/networking/v1"
k8snetworklister "k8s.io/client-go/listers/networking/v1"
api "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
)
import netv1 "k8s.io/api/networking/v1"
// NsNetworkPolicyProvider is a interface to let different cnis to implement our api
type NsNetworkPolicyProvider interface {
Add(*api.NamespaceNetworkPolicy) error
CheckExist(*api.NamespaceNetworkPolicy) (bool, error)
NeedUpdate(*api.NamespaceNetworkPolicy) (bool, error)
Update(*api.NamespaceNetworkPolicy) error
Delete(*api.NamespaceNetworkPolicy) error
Get(*api.NamespaceNetworkPolicy) (interface{}, error)
}
// TODO: support no-calico CNI
type k8sNetworkProvider struct {
networkPolicyInformer k8snetworkinformer.NetworkPolicyInformer
networkPolicyLister k8snetworklister.NetworkPolicyLister
}
func (k *k8sNetworkProvider) Add(o *api.NamespaceNetworkPolicy) error {
return nil
}
func (k *k8sNetworkProvider) CheckExist(o *api.NamespaceNetworkPolicy) (bool, error) {
return false, nil
}
func (k *k8sNetworkProvider) Delete(o *api.NamespaceNetworkPolicy) error {
return nil
Delete(key string)
Set(policy *netv1.NetworkPolicy) error
Start(stopCh <-chan struct{})
GetKey(name, nsname string) string
}

View File

@@ -1,144 +0,0 @@
package provider
import (
"context"
"encoding/json"
"reflect"
"time"
v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/options"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/klogr"
api "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
)
var log = klogr.New().WithName("calico-client")
var defaultBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
type calicoNetworkProvider struct {
np clientv3.NetworkPolicyInterface
}
func NewCalicoNetworkProvider(np clientv3.NetworkPolicyInterface) NsNetworkPolicyProvider {
return &calicoNetworkProvider{
np: np,
}
}
func convertSpec(n *api.NamespaceNetworkPolicySpec) *v3.NetworkPolicySpec {
bytes, err := json.Marshal(&n)
if err != nil {
panic(err)
}
m := new(v3.NetworkPolicySpec)
err = json.Unmarshal(bytes, m)
if err != nil {
panic(err)
}
return m
}
// ConvertAPIToCalico convert our api to calico api
func ConvertAPIToCalico(n *api.NamespaceNetworkPolicy) *v3.NetworkPolicy {
output := v3.NewNetworkPolicy()
//Object Metadata
output.ObjectMeta.Name = n.Name
output.Namespace = n.Namespace
output.Annotations = n.Annotations
output.Labels = n.Labels
//spec
output.Spec = *(convertSpec(&n.Spec))
return output
}
func (k *calicoNetworkProvider) Get(o *api.NamespaceNetworkPolicy) (interface{}, error) {
return k.np.Get(context.TODO(), o.Namespace, o.Name, options.GetOptions{})
}
func (k *calicoNetworkProvider) Add(o *api.NamespaceNetworkPolicy) error {
log.V(3).Info("Creating network policy", "name", o.Name, "namespace", o.Namespace)
obj := ConvertAPIToCalico(o)
log.V(4).Info("Show object spe detail", "name", o.Name, "namespace", o.Namespace, "Spec", obj.Spec)
_, err := k.np.Create(context.TODO(), obj, options.SetOptions{})
return err
}
func (k *calicoNetworkProvider) CheckExist(o *api.NamespaceNetworkPolicy) (bool, error) {
log.V(3).Info("Checking network policy whether exsits or not", "name", o.Name, "namespace", o.Namespace)
out, err := k.np.Get(context.Background(), o.Namespace, o.Name, options.GetOptions{})
if err != nil {
if _, ok := err.(errors.ErrorResourceDoesNotExist); ok {
return false, nil
}
return false, err
}
if out != nil {
return true, nil
}
return false, nil
}
func (k *calicoNetworkProvider) Delete(o *api.NamespaceNetworkPolicy) error {
log.V(3).Info("Deleting network policy", "name", o.Name, "namespace", o.Namespace)
_, err := k.np.Delete(context.Background(), o.Namespace, o.Name, options.DeleteOptions{})
return err
}
func (k *calicoNetworkProvider) NeedUpdate(o *api.NamespaceNetworkPolicy) (bool, error) {
store, err := k.np.Get(context.Background(), o.Namespace, o.Name, options.GetOptions{})
if err != nil {
log.Error(err, "Failed to get resource", "name", o.Name, "namespace", o.Namespace)
}
expected := ConvertAPIToCalico(o)
log.V(4).Info("Comparing Spec", "store", store.Spec, "current", expected.Spec)
if !reflect.DeepEqual(store.Spec, expected.Spec) {
return true, nil
}
return false, nil
}
func (k *calicoNetworkProvider) Update(o *api.NamespaceNetworkPolicy) error {
log.V(3).Info("Updating network policy", "name", o.Name, "namespace", o.Namespace)
updateObject, err := k.Get(o)
if err != nil {
log.Error(err, "Failed to get resource in store")
return err
}
up := updateObject.(*v3.NetworkPolicy)
up.Spec = *convertSpec(&o.Spec)
err = RetryOnConflict(defaultBackoff, func() error {
_, err := k.np.Update(context.Background(), up, options.SetOptions{})
return err
})
if err != nil {
log.Error(err, "Failed to update resource", "name", o.Name, "namespace", o.Namespace)
}
return err
}
// RetryOnConflict is same as the function in k8s, but replaced with error in calico
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
var lastConflictErr error
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn()
if err == nil {
return true, nil
}
if _, ok := err.(errors.ErrorResourceUpdateConflict); ok {
lastConflictErr = err
return false, nil
}
return false, err
})
if err == wait.ErrWaitTimeout {
err = lastConflictErr
}
return err
}

View File

@@ -0,0 +1,250 @@
package provider
import (
"context"
"fmt"
"reflect"
"strings"
"sync"
"time"
rcache "github.com/projectcalico/kube-controllers/pkg/cache"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
uruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informerv1 "k8s.io/client-go/informers/networking/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
const (
defaultSyncTime = 5 * time.Minute
)
func (c *k8sPolicyController) GetKey(name, nsname string) string {
return fmt.Sprintf("%s/%s", nsname, name)
}
func getkey(key string) (string, string) {
strs := strings.Split(key, "/")
return strs[0], strs[1]
}
// policyController implements the Controller interface for managing Kubernetes network policies
// and syncing them to the k8s datastore as NetworkPolicies.
type k8sPolicyController struct {
client kubernetes.Interface
informer informerv1.NetworkPolicyInformer
ctx context.Context
resourceCache rcache.ResourceCache
hasSynced cache.InformerSynced
}
func (c *k8sPolicyController) Start(stopCh <-chan struct{}) {
c.run(5, "5m", stopCh)
}
func (c *k8sPolicyController) Set(np *netv1.NetworkPolicy) error {
klog.V(4).Infof("Set NetworkPolicy %s/%s %+v", np.Namespace, np.Name, np)
// Add to cache.
k := c.GetKey(np.Name, np.Namespace)
c.resourceCache.Set(k, *np)
return nil
}
func (c *k8sPolicyController) Delete(key string) {
klog.V(4).Infof("Delete NetworkPolicy %s", key)
c.resourceCache.Delete(key)
}
// Run starts the controller.
func (c *k8sPolicyController) run(threadiness int, reconcilerPeriod string, stopCh <-chan struct{}) {
defer uruntime.HandleCrash()
// Let the workers stop when we are done
workqueue := c.resourceCache.GetQueue()
defer workqueue.ShutDown()
// Wait until we are in sync with the Kubernetes API before starting the
// resource cache.
klog.Info("Waiting to sync with Kubernetes API (NetworkPolicy)")
if ok := cache.WaitForCacheSync(stopCh, c.hasSynced); !ok {
}
klog.Infof("Finished syncing with Kubernetes API (NetworkPolicy)")
// Start the resource cache - this will trigger the queueing of any keys
// that are out of sync onto the resource cache event queue.
c.resourceCache.Run(reconcilerPeriod)
// Start a number of worker threads to read from the queue. Each worker
// will pull keys off the resource cache event queue and sync them to the
// k8s datastore.
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("NetworkPolicy controller is now running")
<-stopCh
klog.Info("Stopping NetworkPolicy controller")
}
func (c *k8sPolicyController) runWorker() {
for c.processNextItem() {
}
}
// processNextItem waits for an event on the output queue from the resource cache and syncs
// any received keys to the datastore.
func (c *k8sPolicyController) processNextItem() bool {
// Wait until there is a new item in the work queue.
workqueue := c.resourceCache.GetQueue()
key, quit := workqueue.Get()
if quit {
return false
}
// Sync the object to the k8s datastore.
if err := c.syncToDatastore(key.(string)); err != nil {
c.handleErr(err, key.(string))
}
// Indicate that we're done processing this key, allowing for safe parallel processing such that
// two objects with the same key are never processed in parallel.
workqueue.Done(key)
return true
}
// syncToDatastore syncs the given update to the k8s datastore. The provided key can be used to
// find the corresponding resource within the resource cache. If the resource for the provided key
// exists in the cache, then the value should be written to the datastore. If it does not exist
// in the cache, then it should be deleted from the datastore.
func (c *k8sPolicyController) syncToDatastore(key string) error {
// Check if it exists in the controller's cache.
obj, exists := c.resourceCache.Get(key)
if !exists {
// The object no longer exists - delete from the datastore.
klog.Infof("Deleting NetworkPolicy %s from k8s datastore", key)
ns, name := getkey(key)
err := c.client.NetworkingV1().NetworkPolicies(ns).Delete(name, nil)
if errors.IsNotFound(err) {
return nil
}
return err
} else {
// The object exists - update the datastore to reflect.
klog.Infof("Create/Update NetworkPolicy %s in k8s datastore", key)
p := obj.(netv1.NetworkPolicy)
// Lookup to see if this object already exists in the datastore.
gp, err := c.informer.Lister().NetworkPolicies(p.Namespace).Get(p.Name)
if err != nil {
if !errors.IsNotFound(err) {
klog.Warningf("Failed to get NetworkPolicy %s from datastore", key)
return err
}
// Doesn't exist - create it.
_, err := c.client.NetworkingV1().NetworkPolicies(p.Namespace).Create(&p)
if err != nil {
klog.Warningf("Failed to create NetworkPolicy %s", key)
return err
}
klog.Infof("Successfully created NetworkPolicy %s", key)
return nil
}
klog.V(4).Infof("New NetworkPolicy %s/%s %+v\n", p.Namespace, p.Name, p.Spec)
klog.V(4).Infof("Old NetworkPolicy %s/%s %+v\n", gp.Namespace, gp.Name, gp.Spec)
// The policy already exists, update it and write it back to the datastore.
gp.Spec = p.Spec
_, err = c.client.NetworkingV1().NetworkPolicies(p.Namespace).Update(gp)
if err != nil {
klog.Warningf("Failed to update NetworkPolicy %s", key)
return err
}
klog.Infof("Successfully updated NetworkPolicy %s", key)
return nil
}
}
// handleErr handles errors which occur while processing a key received from the resource cache.
// For a given error, we will re-queue the key in order to retry the datastore sync up to 5 times,
// at which point the update is dropped.
func (c *k8sPolicyController) handleErr(err error, key string) {
workqueue := c.resourceCache.GetQueue()
if err == nil {
// Forget about the #AddRateLimited history of the key on every successful synchronization.
// This ensures that future processing of updates for this key is not delayed because of
// an outdated error history.
workqueue.Forget(key)
return
}
// This controller retries 5 times if something goes wrong. After that, it stops trying.
if workqueue.NumRequeues(key) < 5 {
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
klog.Errorf("Error syncing NetworkPolicy %v: %v", key, err)
workqueue.AddRateLimited(key)
return
}
workqueue.Forget(key)
// Report to an external entity that, even after several retries, we could not successfully process this key
uruntime.HandleError(err)
klog.Errorf("Dropping NetworkPolicy %q out of the queue: %v", key, err)
}
//NewNsNetworkPolicyProvider sync k8s NetworkPolicy
func NewNsNetworkPolicyProvider(client kubernetes.Interface, npInformer informerv1.NetworkPolicyInformer) (NsNetworkPolicyProvider, error) {
var once sync.Once
c := &k8sPolicyController{
client: client,
informer: npInformer,
ctx: context.Background(),
hasSynced: npInformer.Informer().HasSynced,
}
// Function returns map of policyName:policy stored by policy controller
// in datastore.
listFunc := func() (map[string]interface{}, error) {
//Wait cache be set by NSNP Controller, otherwise NetworkPolicy will be delete
//by mistake
once.Do(func() {
time.Sleep(defaultSyncTime)
})
// Get all policies from datastore
//TODO filter np not belong to kubesphere
policies, err := npInformer.Lister().List(labels.Everything())
if err != nil {
return nil, err
}
// Filter in only objects that are written by policy controller.
m := make(map[string]interface{})
for _, policy := range policies {
policy.ObjectMeta = metav1.ObjectMeta{Name: policy.Name, Namespace: policy.Namespace}
k := c.GetKey(policy.Name, policy.Namespace)
m[k] = *policy
}
klog.Infof("Found %d policies in k8s datastore:", len(m))
return m, nil
}
cacheArgs := rcache.ResourceCacheArgs{
ListFunc: listFunc,
ObjectType: reflect.TypeOf(netv1.NetworkPolicy{}),
}
c.resourceCache = rcache.NewResourceCache(cacheArgs)
return c, nil
}

View File

@@ -1,80 +0,0 @@
package runoption
import (
"time"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/clientv3"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
ksinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy"
"kubesphere.io/kubesphere/pkg/controller/network/provider"
)
const (
certPath = "/calicocerts"
KubernetesDataStore = "k8s"
EtcdDataStore = "etcd"
)
type RunOption struct {
ProviderName string
DataStoreType string
EtcdEndpoints string
AllowInsecureEtcd bool
}
func (r RunOption) Run() error {
klog.V(1).Info("Check config")
if err := r.check(); err != nil {
return err
}
klog.V(1).Info("Preparing kubernetes client")
config, err := rest.InClusterConfig()
if err != nil {
panic(err.Error())
}
// creates the clientset
k8sClientset := kubernetes.NewForConfigOrDie(config)
ksClientset := versioned.NewForConfigOrDie(config)
informer := ksinformer.NewSharedInformerFactory(ksClientset, time.Minute*10)
klog.V(1).Info("Kubernetes client initialized successfully")
var npProvider provider.NsNetworkPolicyProvider
if r.ProviderName == "calico" {
klog.V(1).Info("Preparing calico client")
config := apiconfig.NewCalicoAPIConfig()
config.Spec.EtcdEndpoints = r.EtcdEndpoints
if !r.AllowInsecureEtcd {
config.Spec.EtcdKeyFile = certPath + "/etcd-key"
config.Spec.EtcdCertFile = certPath + "/etcd-cert"
config.Spec.EtcdCACertFile = certPath + "/etcd-ca"
}
if r.DataStoreType == KubernetesDataStore {
config.Spec.DatastoreType = apiconfig.Kubernetes
} else {
config.Spec.DatastoreType = apiconfig.EtcdV3
}
client, err := clientv3.New(*config)
if err != nil {
klog.Fatal("Failed to initialize calico client", err)
}
npProvider = provider.NewCalicoNetworkProvider(client.NetworkPolicies())
klog.V(1).Info("Calico client initialized successfully")
}
//TODO: support no-calico cni
c := nsnetworkpolicy.NewController(k8sClientset, ksClientset, informer.Network().V1alpha1().NamespaceNetworkPolicies(), npProvider)
stop := make(chan struct{})
klog.V(1).Infof("Starting controller")
go informer.Network().V1alpha1().NamespaceNetworkPolicies().Informer().Run(stop)
return c.Run(1, stop)
}
func (r RunOption) check() error {
return nil
}

View File

@@ -1,38 +0,0 @@
package testing
import (
"time"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
)
var (
AlwaysReady = func() bool { return true }
ResyncPeriodFunc = func() time.Duration { return 1 * time.Second }
)
type FakeControllerBuilder struct {
KsClient *fake.Clientset
KubeClient *k8sfake.Clientset
Kubeobjects []runtime.Object
CRDObjects []runtime.Object
}
func NewFakeControllerBuilder() *FakeControllerBuilder {
return &FakeControllerBuilder{
Kubeobjects: make([]runtime.Object, 0),
CRDObjects: make([]runtime.Object, 0),
}
}
func (f *FakeControllerBuilder) NewControllerInformer() (informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
f.KsClient = fake.NewSimpleClientset(f.CRDObjects...)
f.KubeClient = k8sfake.NewSimpleClientset(f.Kubeobjects...)
i := informers.NewSharedInformerFactory(f.KsClient, ResyncPeriodFunc())
k8sI := kubeinformers.NewSharedInformerFactory(f.KubeClient, ResyncPeriodFunc())
return i, k8sI
}

View File

@@ -1,12 +0,0 @@
package testing
import (
"strings"
"k8s.io/apimachinery/pkg/util/yaml"
)
func StringToObject(data string, obj interface{}) error {
reader := strings.NewReader(data)
return yaml.NewYAMLOrJSONDecoder(reader, 10).Decode(obj)
}

View File

@@ -1,22 +0,0 @@
package utils
// ContainsString report if s is in a slice
func ContainsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
// RemoveString remove s from slice if exists
func RemoveString(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}

View File

@@ -1,280 +0,0 @@
package wsnetworkpolicy
import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
k8snetwork "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
corev1informer "k8s.io/client-go/informers/core/v1"
k8snetworkinformer "k8s.io/client-go/informers/networking/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corev1lister "k8s.io/client-go/listers/core/v1"
k8snetworklister "k8s.io/client-go/listers/networking/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"k8s.io/klog/klogr"
workspaceapi "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
kubespherescheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
networkinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
workspaceinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
networklister "kubesphere.io/kubesphere/pkg/client/listers/network/v1alpha1"
workspacelister "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller/network/controllerapi"
)
const controllerAgentName = "wsnp-controller"
var (
log = klogr.New().WithName("Controller").WithValues(controllerAgentName)
errCount = 0
)
type controller struct {
kubeClientset kubernetes.Interface
kubesphereClientset kubesphereclient.Interface
wsnpInformer networkinformer.WorkspaceNetworkPolicyInformer
wsnpLister networklister.WorkspaceNetworkPolicyLister
wsnpSynced cache.InformerSynced
networkPolicyInformer k8snetworkinformer.NetworkPolicyInformer
networkPolicyLister k8snetworklister.NetworkPolicyLister
networkPolicySynced cache.InformerSynced
namespaceLister corev1lister.NamespaceLister
namespaceInformer corev1informer.NamespaceInformer
namespaceSynced cache.InformerSynced
workspaceLister workspacelister.WorkspaceLister
workspaceInformer workspaceinformer.WorkspaceInformer
workspaceSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
}
func NewController(kubeclientset kubernetes.Interface,
kubesphereclientset kubesphereclient.Interface,
wsnpInformer networkinformer.WorkspaceNetworkPolicyInformer,
networkPolicyInformer k8snetworkinformer.NetworkPolicyInformer,
namespaceInformer corev1informer.NamespaceInformer,
workspaceInformer workspaceinformer.WorkspaceInformer) controllerapi.Controller {
utilruntime.Must(kubespherescheme.AddToScheme(scheme.Scheme))
log.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
ctl := &controller{
kubeClientset: kubeclientset,
kubesphereClientset: kubesphereclientset,
wsnpInformer: wsnpInformer,
wsnpLister: wsnpInformer.Lister(),
wsnpSynced: wsnpInformer.Informer().HasSynced,
networkPolicyInformer: networkPolicyInformer,
networkPolicyLister: networkPolicyInformer.Lister(),
networkPolicySynced: networkPolicyInformer.Informer().HasSynced,
namespaceInformer: namespaceInformer,
namespaceLister: namespaceInformer.Lister(),
namespaceSynced: namespaceInformer.Informer().HasSynced,
workspaceInformer: workspaceInformer,
workspaceLister: workspaceInformer.Lister(),
workspaceSynced: workspaceInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkspaceNetworkPolicies"),
recorder: recorder,
}
log.Info("Setting up event handlers")
wsnpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.enqueueWSNP,
UpdateFunc: func(old, new interface{}) {
ctl.enqueueWSNP(new)
},
})
networkPolicyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.handleNP,
UpdateFunc: func(old, new interface{}) {
newNP := new.(*k8snetwork.NetworkPolicy)
oldNP := old.(*k8snetwork.NetworkPolicy)
if newNP.ResourceVersion == oldNP.ResourceVersion {
return
}
ctl.handleNP(new)
},
DeleteFunc: ctl.handleNP,
})
workspaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ctl.handleWS,
UpdateFunc: func(old, new interface{}) {
newNP := new.(*workspaceapi.Workspace)
oldNP := old.(*workspaceapi.Workspace)
if newNP.ResourceVersion == oldNP.ResourceVersion {
return
}
ctl.handleWS(new)
},
DeleteFunc: ctl.handleNP,
})
return ctl
}
func (c *controller) handleWS(obj interface{}) {
ws := obj.(*workspaceapi.Workspace)
wsnps, err := c.wsnpLister.List(labels.Everything())
if err != nil {
log.Error(err, "Failed to get WSNP when a workspace changed ")
return
}
for _, wsnp := range wsnps {
log.V(4).Info("Enqueue wsnp because a workspace being changed", "obj", ws.Name)
c.enqueueWSNP(wsnp)
}
return
}
func (c *controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
log.Info("Starting WSNP controller")
// Wait for the caches to be synced before starting workers
log.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.wsnpSynced, c.namespaceSynced, c.networkPolicySynced, c.workspaceSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
log.Info("Starting workers")
// Launch two workers to process Foo resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
log.Info("Shutting down workers")
return nil
}
func (c *controller) enqueueWSNP(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
}
func (c *controller) handleNP(obj interface{}) {
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
log.V(4).Info("Recovered deleted object from tombstone", "name", object.GetName())
}
log.V(4).Info("Processing object:", "name", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
if ownerRef.Kind != "WorkspaceNetworkPol" {
return
}
wsnp, err := c.wsnpLister.Get(ownerRef.Name)
if err != nil {
log.V(4).Info("ignoring orphaned object", "link", object.GetSelfLink(), "name", ownerRef.Name)
return
}
c.enqueueWSNP(wsnp)
return
}
}
func (c *controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the reconcile, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.reconcile(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
log.Info("Successfully synced", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
func (c *controller) handleError(err error) {
log.Error(err, "Error in handling")
errCount++
}

View File

@@ -1,203 +0,0 @@
package wsnetworkpolicy
import (
"fmt"
"reflect"
"sort"
corev1 "k8s.io/api/core/v1"
ks8network "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
errutil "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
wsnpapi "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
)
const (
workspaceSelectorLabel = "kubesphere.io/workspace"
workspaceNetworkPolicyLabel = "networking.kubesphere.io/wsnp"
MessageResourceExists = "Resource %q already exists and is not managed by WorkspaceNetworkPolicy"
ErrResourceExists = "ErrResourceExists"
)
var everything = labels.Everything()
var reconcileCount = 0
// NetworkPolicyNameForWSNP return the name of the networkpolicy owned by this WNSP
func NetworkPolicyNameForWSNP(wsnp string) string {
return wsnp + "-np"
}
func (c *controller) reconcile(key string) error {
reconcileCount++
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
olog := log.WithName(name)
olog.Info("Begin to reconcile")
owner, err := c.wsnpLister.Get(name)
if err != nil {
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("WSNP '%s' in work queue no longer exists", key))
return nil
}
return err
}
namespaces, err := c.listNamespacesInWorkspace(owner.Spec.Workspace)
if err != nil {
return err
}
var errs []error
for _, ns := range namespaces {
err = c.reconcileNamespace(ns.Name, owner)
if err != nil {
errs = append(errs, err)
}
}
if len(errs) == 0 {
return nil
}
return errutil.NewAggregate(errs)
}
func (c *controller) reconcileNamespace(name string, wsnp *wsnpapi.WorkspaceNetworkPolicy) error {
npname := NetworkPolicyNameForWSNP(wsnp.Name)
np, err := c.generateNPForNamesapce(name, wsnp)
if err != nil {
log.Error(nil, "Failed to generate NetworkPolicy", "wsnp", wsnp, "namespace", name)
return err
}
old, err := c.networkPolicyLister.NetworkPolicies(name).Get(npname)
if errors.IsNotFound(err) {
_, err = c.kubeClientset.NetworkingV1().NetworkPolicies(name).Create(np)
if err != nil {
log.Error(err, "cannot create networkpolicy of this wsnp", wsnp)
return err
}
return nil
}
if err != nil {
log.Error(err, "Failed to get networkPolicy")
return err
}
if !metav1.IsControlledBy(old, wsnp) {
msg := fmt.Sprintf(MessageResourceExists, old.Name)
c.recorder.Event(wsnp, corev1.EventTypeWarning, ErrResourceExists, msg)
return fmt.Errorf(msg)
}
if !reflect.DeepEqual(old.Spec, np.Spec) {
log.V(2).Info("Detect network policy changed, updating network policy", "the old one", old.Spec, "the new one", np.Spec)
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, err = c.kubeClientset.NetworkingV1().NetworkPolicies(name).Update(np)
return err
})
if err != nil {
log.Error(err, "Failed to update wsnp")
return err
}
log.V(2).Info("updating completed")
}
return nil
}
func (c *controller) generateNPForNamesapce(ns string, wsnp *wsnpapi.WorkspaceNetworkPolicy) (*ks8network.NetworkPolicy, error) {
np := &ks8network.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: NetworkPolicyNameForWSNP(wsnp.Name),
Namespace: ns,
Labels: map[string]string{workspaceNetworkPolicyLabel: wsnp.Name},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(wsnp, wsnpapi.SchemeGroupVersion.WithKind("WorkspaceNetworkPolicy")),
},
},
Spec: ks8network.NetworkPolicySpec{
PolicyTypes: wsnp.Spec.PolicyTypes,
},
}
if wsnp.Spec.Ingress != nil {
np.Spec.Ingress = make([]ks8network.NetworkPolicyIngressRule, len(wsnp.Spec.Ingress))
for index, ing := range wsnp.Spec.Ingress {
ingRule, err := c.transformWSNPIngressToK8sIngress(ing)
if err != nil {
return nil, err
}
np.Spec.Ingress[index] = *ingRule
}
}
return np, nil
}
func (c *controller) transformWSNPIngressToK8sIngress(rule wsnpapi.WorkspaceNetworkPolicyIngressRule) (*ks8network.NetworkPolicyIngressRule, error) {
k8srule := &ks8network.NetworkPolicyIngressRule{
Ports: rule.Ports,
From: make([]ks8network.NetworkPolicyPeer, len(rule.From)),
}
for index, f := range rule.From {
k8srule.From[index] = f.NetworkPolicyPeer
if f.WorkspaceSelector != nil {
if f.WorkspaceSelector.Size() == 0 {
k8srule.From[index].NamespaceSelector = &metav1.LabelSelector{}
} else {
selector, err := metav1.LabelSelectorAsSelector(f.WorkspaceSelector)
if err != nil {
log.Error(err, "Failed to convert label selectors")
return nil, err
}
ws, err := c.workspaceLister.List(selector)
if err != nil {
log.Error(err, "Failed to list workspaces")
return nil, err
}
if len(ws) == 0 {
log.Info("ws selector doesnot match anything")
continue
}
if k8srule.From[index].NamespaceSelector == nil {
k8srule.From[index].NamespaceSelector = &metav1.LabelSelector{}
}
if len(ws) == 1 {
if k8srule.From[index].NamespaceSelector.MatchLabels == nil {
k8srule.From[index].NamespaceSelector.MatchLabels = make(map[string]string)
}
k8srule.From[index].NamespaceSelector.MatchLabels[workspaceSelectorLabel] = ws[0].Name
} else {
if k8srule.From[index].NamespaceSelector.MatchExpressions == nil {
k8srule.From[index].NamespaceSelector.MatchExpressions = make([]metav1.LabelSelectorRequirement, 0)
}
re := metav1.LabelSelectorRequirement{
Key: workspaceSelectorLabel,
Operator: metav1.LabelSelectorOpIn,
Values: make([]string, len(ws)),
}
for index, w := range ws {
re.Values[index] = w.Name
}
sort.Strings(re.Values)
k8srule.From[index].NamespaceSelector.MatchExpressions = append(k8srule.From[index].NamespaceSelector.MatchExpressions, re)
}
}
}
}
return k8srule, nil
}
func (c *controller) listNamespacesInWorkspace(workspace string) ([]*corev1.Namespace, error) {
selector, err := labels.Parse(workspaceSelectorLabel + "==" + workspace)
if err != nil {
log.Error(err, "Failed to parse label selector")
return nil, err
}
namespaces, err := c.namespaceLister.List(selector)
if err != nil {
log.Error(err, "Failed to list namespaces in this workspace")
return nil, err
}
return namespaces, nil
}

View File

@@ -1,21 +0,0 @@
package wsnetworkpolicy
import (
"flag"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog"
)
func TestWsnetworkpolicy(t *testing.T) {
klog.InitFlags(nil)
flag.Set("logtostderr", "false")
flag.Set("alsologtostderr", "false")
flag.Set("v", "4")
flag.Parse()
klog.SetOutput(GinkgoWriter)
RegisterFailHandler(Fail)
RunSpecs(t, "Wsnetworkpolicy Suite")
}

View File

@@ -1,242 +0,0 @@
package wsnetworkpolicy
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"
corev1 "k8s.io/api/core/v1"
k8snetwork "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
netv1lister "k8s.io/client-go/listers/networking/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
tenant "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/controller/network/controllerapi"
controllertesting "kubesphere.io/kubesphere/pkg/controller/network/testing"
)
var (
fakeControllerBuilder *controllertesting.FakeControllerBuilder
c controllerapi.Controller
npLister netv1lister.NetworkPolicyLister
stopCh chan struct{}
deletePolicy metav1.DeletionPropagation
testName string
)
var _ = Describe("Wsnetworkpolicy", func() {
BeforeEach(func() {
deletePolicy = metav1.DeletePropagationBackground
fakeControllerBuilder = controllertesting.NewFakeControllerBuilder()
informer, k8sinformer := fakeControllerBuilder.NewControllerInformer()
stopCh = make(chan struct{})
c = NewController(fakeControllerBuilder.KubeClient, fakeControllerBuilder.KsClient,
informer.Network().V1alpha1().WorkspaceNetworkPolicies(), k8sinformer.Networking().V1().NetworkPolicies(),
k8sinformer.Core().V1().Namespaces(), informer.Tenant().V1alpha1().Workspaces())
originalController := c.(*controller)
go originalController.wsnpInformer.Informer().Run(stopCh)
go originalController.networkPolicyInformer.Informer().Run(stopCh)
go originalController.namespaceInformer.Informer().Run(stopCh)
go originalController.workspaceInformer.Informer().Run(stopCh)
originalController.recorder = &record.FakeRecorder{}
go c.Run(1, stopCh)
npLister = k8sinformer.Networking().V1().NetworkPolicies().Lister()
testName = "test"
ns1 := newWorkspaceNamespaces("ns1", testName)
ns2 := newWorkspaceNamespaces("ns2", testName)
_, err := fakeControllerBuilder.KubeClient.CoreV1().Namespaces().Create(ns1)
Expect(err).ShouldNot(HaveOccurred())
_, err = fakeControllerBuilder.KubeClient.CoreV1().Namespaces().Create(ns2)
Expect(err).ShouldNot(HaveOccurred())
})
AfterEach(func() {
close(stopCh)
})
It("Should proper ingress rule when using workspaceSelector", func() {
label := map[string]string{"workspace": "test-selector"}
ws := &tenant.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Labels: label,
},
}
_, err := fakeControllerBuilder.KsClient.TenantV1alpha1().Workspaces().Create(ws)
wsnp := newWorkspaceNP(testName)
wsnp.Spec.PolicyTypes = []k8snetwork.PolicyType{k8snetwork.PolicyTypeIngress}
wsnp.Spec.Ingress = []v1alpha1.WorkspaceNetworkPolicyIngressRule{
{
From: []v1alpha1.WorkspaceNetworkPolicyPeer{
{
WorkspaceSelector: &metav1.LabelSelector{
MatchLabels: label,
},
},
},
}}
_, err = fakeControllerBuilder.KsClient.NetworkV1alpha1().WorkspaceNetworkPolicies().Create(wsnp)
Expect(err).ShouldNot(HaveOccurred())
expect1Json := `{
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"name": "test-np",
"namespace": "ns1",
"labels": {
"networking.kubesphere.io/wsnp": "test"
}
},
"spec": {
"policyTypes": [
"Ingress"
],
"ingress": [
{
"from": [
{
"namespaceSelector": {
"matchLabels": {
"kubesphere.io/workspace": "test"
}
}
}
]
}
]
}
}`
expect1 := &k8snetwork.NetworkPolicy{}
Expect(controllertesting.StringToObject(expect1Json, expect1)).ShouldNot(HaveOccurred())
nps := []*k8snetwork.NetworkPolicy{}
Eventually(func() error {
selector, _ := labels.Parse(workspaceNetworkPolicyLabel + "==test")
nps, err = npLister.List(selector)
if err != nil {
klog.Errorf("Failed to list npmerr:%s", err.Error())
return err
}
if len(nps) != 2 {
return fmt.Errorf("Length is not right, current length :%d", len(nps))
}
return nil
}, time.Second*5, time.Second).ShouldNot(HaveOccurred())
for _, np := range nps {
Expect(np.Labels).To(Equal(expect1.Labels))
Expect(np.Spec).To(Equal(expect1.Spec))
}
// create a new ws will change the `From`
ws2 := &tenant.Workspace{
ObjectMeta: metav1.ObjectMeta{
Name: "test2",
Labels: label,
},
}
_, err = fakeControllerBuilder.KsClient.TenantV1alpha1().Workspaces().Create(ws2)
Expect(err).ShouldNot(HaveOccurred())
expect2Json := `{
"apiVersion": "networking.k8s.io/v1",
"kind": "NetworkPolicy",
"metadata": {
"name": "test-np",
"namespace": "ns1",
"labels": {
"networking.kubesphere.io/wsnp": "test"
}
},
"spec": {
"policyTypes": [
"Ingress"
],
"ingress": [
{
"from": [
{
"namespaceSelector": {
"matchExpressions": [{
"key": "kubesphere.io/workspace",
"operator":"In",
"values": ["test", "test2"]
}]
}
}
]
}
]
}
}`
expect2 := &k8snetwork.NetworkPolicy{}
Expect(controllertesting.StringToObject(expect2Json, expect2)).ShouldNot(HaveOccurred())
id := func(element interface{}) string {
e := element.(*k8snetwork.NetworkPolicy)
return e.Namespace
}
Eventually(func() []*k8snetwork.NetworkPolicy {
selector, _ := labels.Parse(workspaceNetworkPolicyLabel + "=test")
nps, err := npLister.List(selector)
if err != nil {
return nil
}
if len(nps) != 2 {
klog.Errorf("Length is not right, current length :%d", len(nps))
return nil
}
return nps
}, time.Second*5, time.Second).Should(MatchAllElements(id, Elements{
"ns1": PointTo(MatchFields(IgnoreExtras, Fields{
"Spec": Equal(expect2.Spec),
})),
"ns2": PointTo(MatchFields(IgnoreExtras, Fields{
"Spec": Equal(expect2.Spec),
})),
}))
})
It("Should create networkpolicies", func() {
//create a wsnp
_, err := fakeControllerBuilder.KsClient.NetworkV1alpha1().WorkspaceNetworkPolicies().Create(newWorkspaceNP(testName))
Expect(err).ShouldNot(HaveOccurred())
Eventually(func() error {
selector, _ := labels.Parse(workspaceNetworkPolicyLabel + "=" + testName)
nps, err := npLister.List(selector)
if err != nil {
return err
}
if len(nps) != 2 {
return fmt.Errorf("Length is not right, current length :%d", len(nps))
}
return nil
}, time.Second*5, time.Second).ShouldNot(HaveOccurred())
err = fakeControllerBuilder.KsClient.NetworkV1alpha1().WorkspaceNetworkPolicies().Delete(testName, &metav1.DeleteOptions{
PropagationPolicy: &deletePolicy,
})
Expect(err).ShouldNot(HaveOccurred())
})
})
func newWorkspaceNP(name string) *v1alpha1.WorkspaceNetworkPolicy {
return &v1alpha1.WorkspaceNetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1alpha1.WorkspaceNetworkPolicySpec{
Workspace: name,
},
}
}
func newWorkspaceNamespaces(ns, ws string) *corev1.Namespace {
return &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
Labels: map[string]string{workspaceSelectorLabel: ws},
},
}
}