pipeline crd

Signed-off-by: runzexia <runzexia@yunify.com>
This commit is contained in:
runzexia
2020-03-25 10:58:39 +08:00
parent 7a00f9e3e4
commit 23c8d71a5a
28 changed files with 3031 additions and 612 deletions

View File

@@ -0,0 +1,276 @@
package pipeline
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
corev1informer "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corev1lister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
devopsinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/devops/v1alpha3"
devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha3"
"kubesphere.io/kubesphere/pkg/constants"
devopsClient "kubesphere.io/kubesphere/pkg/simple/client/devops"
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
"net/http"
"reflect"
"time"
)
/**
DevOps project controller is used to maintain the state of the DevOps project.
*/
type Controller struct {
client clientset.Interface
kubesphereClient kubesphereclient.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
devOpsProjectLister devopslisters.PipelineLister
pipelineSynced cache.InformerSynced
namespaceLister corev1lister.NamespaceLister
namespaceSynced cache.InformerSynced
workqueue workqueue.RateLimitingInterface
workerLoopPeriod time.Duration
devopsClient devopsClient.Interface
}
func NewController(client clientset.Interface,
kubesphereClient kubesphereclient.Interface,
devopsClinet devopsClient.Interface,
namespaceInformer corev1informer.NamespaceInformer,
devopsInformer devopsinformers.PipelineInformer) *Controller {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(func(format string, args ...interface{}) {
klog.Info(fmt.Sprintf(format, args))
})
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "pipeline-controller"})
v := &Controller{
client: client,
devopsClient: devopsClinet,
kubesphereClient: kubesphereClient,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pipeline"),
devOpsProjectLister: devopsInformer.Lister(),
pipelineSynced: devopsInformer.Informer().HasSynced,
namespaceLister: namespaceInformer.Lister(),
namespaceSynced: namespaceInformer.Informer().HasSynced,
workerLoopPeriod: time.Second,
}
v.eventBroadcaster = broadcaster
v.eventRecorder = recorder
devopsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: v.enqueuePipeline,
UpdateFunc: func(oldObj, newObj interface{}) {
old := oldObj.(*devopsv1alpha3.Pipeline)
new := newObj.(*devopsv1alpha3.Pipeline)
if old.ResourceVersion == new.ResourceVersion {
return
}
v.enqueuePipeline(newObj)
},
DeleteFunc: v.enqueuePipeline,
})
return v
}
// enqueuePipeline takes a Foo resource and converts it into a namespace/name
// string which is then put onto the work workqueue. This method should *not* be
// passed resources of any type other than DevOpsProject.
func (c *Controller) enqueuePipeline(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.workqueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
if err := c.syncHandler(key); err != nil {
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
c.workqueue.Forget(obj)
klog.V(5).Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
klog.Error(err, "could not reconcile devopsProject")
utilruntime.HandleError(err)
return true
}
return true
}
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) Start(stopCh <-chan struct{}) error {
return c.Run(1, stopCh)
}
func (c *Controller) Run(workers int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
klog.Info("starting pipeline controller")
defer klog.Info("shutting down pipeline controller")
if !cache.WaitForCacheSync(stopCh, c.pipelineSynced) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < workers; i++ {
go wait.Until(c.worker, c.workerLoopPeriod, stopCh)
}
<-stopCh
return nil
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the pipeline resource
// with the current status of the resource.
func (c *Controller) syncHandler(key string) error {
nsName, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.Error(err, fmt.Sprintf("could not split copyPipeline meta %s ", key))
return nil
}
namespace, err := c.namespaceLister.Get(nsName)
if err != nil {
if errors.IsNotFound(err) {
klog.Info(fmt.Sprintf("namespace '%s' in work queue no longer exists ", key))
return nil
}
klog.Error(err, fmt.Sprintf("could not get namespace %s ", key))
return err
}
if !isDevOpsProjectAdminNamespace(namespace) {
err := fmt.Errorf("cound not create copyPipeline in normal namespaces %s", namespace.Name)
klog.Warning(err)
return err
}
pipeline, err := c.devOpsProjectLister.Pipelines(nsName).Get(name)
if err != nil {
if errors.IsNotFound(err) {
klog.Info(fmt.Sprintf("copyPipeline '%s' in work queue no longer exists ", key))
return nil
}
klog.Error(err, fmt.Sprintf("could not get copyPipeline %s ", key))
return err
}
copyPipeline := pipeline.DeepCopy()
// DeletionTimestamp.IsZero() means copyPipeline has not been deleted.
if copyPipeline.ObjectMeta.DeletionTimestamp.IsZero() {
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers
if !sliceutil.HasString(copyPipeline.ObjectMeta.Finalizers, devopsv1alpha3.PipelineFinalizerName) {
copyPipeline.ObjectMeta.Finalizers = append(copyPipeline.ObjectMeta.Finalizers, devopsv1alpha3.PipelineFinalizerName)
}
// Check pipeline config exists, otherwise we will create it.
// if pipeline exists, check & update config
jenkinsPipeline, err := c.devopsClient.GetProjectPipelineConfig(nsName, pipeline.Name)
if err == nil {
if !reflect.DeepEqual(jenkinsPipeline.Spec, copyPipeline.Spec) {
_, err := c.devopsClient.UpdateProjectPipeline(nsName, copyPipeline)
if err != nil {
klog.Error(err, fmt.Sprintf("failed to update pipeline config %s ", key))
return err
}
}
} else if devopsClient.GetDevOpsStatusCode(err) != http.StatusNotFound {
klog.Error(err, fmt.Sprintf("failed to get copyPipeline %s ", key))
return err
} else {
_, err := c.devopsClient.CreateProjectPipeline(nsName, copyPipeline)
if err != nil {
klog.Error(err, fmt.Sprintf("failed to get copyPipeline %s ", key))
return err
}
}
} else {
// Finalizers processing logic
if sliceutil.HasString(copyPipeline.ObjectMeta.Finalizers, devopsv1alpha3.PipelineFinalizerName) {
_, err := c.devopsClient.GetProjectPipelineConfig(nsName, pipeline.Name)
if err != nil && devopsClient.GetDevOpsStatusCode(err) != http.StatusNotFound {
klog.Error(err, fmt.Sprintf("failed to get pipeline %s ", key))
return err
} else if err != nil && devopsClient.GetDevOpsStatusCode(err) == http.StatusNotFound {
} else {
if _, err := c.devopsClient.DeleteProjectPipeline(nsName, pipeline.Name); err != nil {
klog.Error(err, fmt.Sprintf("failed to delete pipeline %s in devops", key))
return err
}
}
copyPipeline.ObjectMeta.Finalizers = sliceutil.RemoveString(copyPipeline.ObjectMeta.Finalizers, func(item string) bool {
return item == devopsv1alpha3.DevOpsProjectFinalizerName
})
}
}
if !reflect.DeepEqual(pipeline, copyPipeline) {
_, err = c.kubesphereClient.DevopsV1alpha3().Pipelines(nsName).Update(copyPipeline)
if err != nil {
klog.Error(err, fmt.Sprintf("failed to update pipeline %s ", key))
return err
}
}
return nil
}
func isDevOpsProjectAdminNamespace(namespace *v1.Namespace) bool {
_, ok := namespace.Labels[constants.DevOpsProjectLabelKey]
return ok && k8sutil.IsControlledBy(namespace.OwnerReferences,
devopsv1alpha3.ResourceKindDevOpsProject, "")
}

View File

@@ -0,0 +1,400 @@
package pipeline
import (
v1 "k8s.io/api/core/v1"
"kubesphere.io/kubesphere/pkg/constants"
fakeDevOps "kubesphere.io/kubesphere/pkg/simple/client/devops/fake"
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
devops "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
)
type fixture struct {
t *testing.T
client *fake.Clientset
kubeclient *k8sfake.Clientset
namespaceLister []*v1.Namespace
pipelineLister []*devops.Pipeline
actions []core.Action
kubeactions []core.Action
kubeobjects []runtime.Object
// Objects from here preloaded into NewSimpleFake.
objects []runtime.Object
// Objects from here preloaded into devops
initDevOpsProject string
initPipeline []*devops.Pipeline
expectPipeline []*devops.Pipeline
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
return f
}
func newNamespace(name string, projectName string) *v1.Namespace {
ns := &v1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
APIVersion: v1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{constants.DevOpsProjectLabelKey: projectName},
},
}
TRUE := true
ns.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: devops.SchemeGroupVersion.String(),
Kind: devops.ResourceKindDevOpsProject,
Name: projectName,
BlockOwnerDeletion: &TRUE,
Controller: &TRUE,
},
}
return ns
}
func newPipeline(namespace, name string, spec devops.PipelineSpec, withFinalizers bool) *devops.Pipeline {
pipeline := &devops.Pipeline{
TypeMeta: metav1.TypeMeta{
Kind: devops.ResourceKindPipeline,
APIVersion: devops.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Spec: spec,
Status: devops.PipelineStatus{},
}
if withFinalizers {
pipeline.Finalizers = append(pipeline.Finalizers, devops.PipelineFinalizerName)
}
return pipeline
}
func newDeletingPipeline(namespace, name string) *devops.Pipeline {
now := metav1.Now()
pipeline := &devops.Pipeline{
TypeMeta: metav1.TypeMeta{
Kind: devops.ResourceKindPipeline,
APIVersion: devops.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
DeletionTimestamp: &now,
},
}
pipeline.Finalizers = append(pipeline.Finalizers, devops.PipelineFinalizerName)
return pipeline
}
func (f *fixture) newController() (*Controller, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory, *fakeDevOps.Devops) {
f.client = fake.NewSimpleClientset(f.objects...)
f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc())
k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
dI := fakeDevOps.NewWithPipelines(f.initDevOpsProject, f.initPipeline...)
c := NewController(f.kubeclient, f.client, dI, k8sI.Core().V1().Namespaces(),
i.Devops().V1alpha3().Pipelines())
c.pipelineSynced = alwaysReady
c.eventRecorder = &record.FakeRecorder{}
for _, f := range f.pipelineLister {
i.Devops().V1alpha3().Pipelines().Informer().GetIndexer().Add(f)
}
for _, d := range f.namespaceLister {
k8sI.Core().V1().Namespaces().Informer().GetIndexer().Add(d)
}
return c, i, k8sI, dI
}
func (f *fixture) run(fooName string) {
f.runController(fooName, true, false)
}
func (f *fixture) runExpectError(fooName string) {
f.runController(fooName, true, true)
}
func (f *fixture) runController(projectName string, startInformers bool, expectError bool) {
c, i, k8sI, dI := f.newController()
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
i.Start(stopCh)
k8sI.Start(stopCh)
}
err := c.syncHandler(projectName)
if !expectError && err != nil {
f.t.Errorf("error syncing foo: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing foo, got nil")
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
checkAction(expectedAction, action, f.t)
}
k8sActions := filterInformerActions(f.kubeclient.Actions())
for i, action := range k8sActions {
if len(f.kubeactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:])
break
}
expectedAction := f.kubeactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.kubeactions) > len(k8sActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
if len(dI.Pipelines[f.initDevOpsProject]) != len(f.expectPipeline) {
f.t.Errorf(" unexpected objects: %v", dI.Projects)
}
for _, pipeline := range f.expectPipeline {
actualPipeline := dI.Pipelines[f.initDevOpsProject][pipeline.Name]
if !reflect.DeepEqual(actualPipeline, pipeline) {
f.t.Errorf(" pipeline %+v not match %+v", pipeline, actualPipeline)
}
}
}
// checkAction verifies that expected and actual actions are equal and both have
// same attached resources
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
// filterInformerActions filters list and watch actions for testing resources.
// Since list and watch don't change resource state we can filter it to lower
// nose level in our tests.
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", devops.ResourcePluralPipeline) ||
action.Matches("watch", devops.ResourcePluralPipeline) ||
action.Matches("list", "namespaces") ||
action.Matches("watch", "namespaces")) {
continue
}
ret = append(ret, action)
}
return ret
}
func (f *fixture) expectUpdatePipelineAction(p *devops.Pipeline) {
action := core.NewUpdateAction(schema.GroupVersionResource{
Version: devops.SchemeGroupVersion.Version,
Resource: devops.ResourcePluralPipeline,
Group: devops.SchemeGroupVersion.Group,
}, p.Namespace, p)
f.actions = append(f.actions, action)
}
func getKey(p *devops.Pipeline, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(p)
if err != nil {
t.Errorf("Unexpected error getting key for pipeline %v: %v", p.Name, err)
return ""
}
return key
}
func TestDoNothing(t *testing.T) {
f := newFixture(t)
nsName := "test-123"
pipelineName := "test"
projectName := "test_project"
ns := newNamespace(nsName, projectName)
pipeline := newPipeline(nsName, pipelineName, devops.PipelineSpec{}, true)
f.pipelineLister = append(f.pipelineLister, pipeline)
f.namespaceLister = append(f.namespaceLister, ns)
f.objects = append(f.objects, pipeline)
f.initDevOpsProject = nsName
f.initPipeline = []*devops.Pipeline{pipeline}
f.expectPipeline = []*devops.Pipeline{pipeline}
f.run(getKey(pipeline, t))
}
func TestAddPipelineFinalizers(t *testing.T) {
f := newFixture(t)
nsName := "test-123"
pipelineName := "test"
projectName := "test_project"
ns := newNamespace(nsName, projectName)
pipeline := newPipeline(nsName, pipelineName, devops.PipelineSpec{}, false)
expectPipeline := newPipeline(nsName, pipelineName, devops.PipelineSpec{}, true)
f.pipelineLister = append(f.pipelineLister, pipeline)
f.namespaceLister = append(f.namespaceLister, ns)
f.objects = append(f.objects, pipeline)
f.initDevOpsProject = nsName
f.initPipeline = []*devops.Pipeline{pipeline}
f.expectPipeline = []*devops.Pipeline{pipeline}
f.expectUpdatePipelineAction(expectPipeline)
f.run(getKey(pipeline, t))
}
func TestCreatePipeline(t *testing.T) {
f := newFixture(t)
nsName := "test-123"
pipelineName := "test"
projectName := "test_project"
ns := newNamespace(nsName, projectName)
pipeline := newPipeline(nsName, pipelineName, devops.PipelineSpec{}, true)
f.pipelineLister = append(f.pipelineLister, pipeline)
f.namespaceLister = append(f.namespaceLister, ns)
f.objects = append(f.objects, pipeline)
f.initDevOpsProject = nsName
f.expectPipeline = []*devops.Pipeline{pipeline}
f.run(getKey(pipeline, t))
}
func TestDeletePipeline(t *testing.T) {
f := newFixture(t)
nsName := "test-123"
pipelineName := "test"
projectName := "test_project"
ns := newNamespace(nsName, projectName)
pipeline := newDeletingPipeline(nsName, pipelineName)
f.pipelineLister = append(f.pipelineLister, pipeline)
f.namespaceLister = append(f.namespaceLister, ns)
f.objects = append(f.objects, pipeline)
f.initDevOpsProject = nsName
f.initPipeline = []*devops.Pipeline{pipeline}
f.expectPipeline = []*devops.Pipeline{}
f.run(getKey(pipeline, t))
}
func TestDeleteNotExistPipeline(t *testing.T) {
f := newFixture(t)
nsName := "test-123"
pipelineName := "test"
projectName := "test_project"
ns := newNamespace(nsName, projectName)
pipeline := newDeletingPipeline(nsName, pipelineName)
f.pipelineLister = append(f.pipelineLister, pipeline)
f.namespaceLister = append(f.namespaceLister, ns)
f.objects = append(f.objects, pipeline)
f.initDevOpsProject = nsName
f.initPipeline = []*devops.Pipeline{}
f.expectPipeline = []*devops.Pipeline{}
f.run(getKey(pipeline, t))
}
func TestUpdatePipelineConfig(t *testing.T) {
f := newFixture(t)
nsName := "test-123"
pipelineName := "test"
projectName := "test_project"
ns := newNamespace(nsName, projectName)
initPipeline := newPipeline(nsName, pipelineName, devops.PipelineSpec{}, true)
expectPipeline := newPipeline(nsName, pipelineName, devops.PipelineSpec{Type: "aa"}, true)
f.pipelineLister = append(f.pipelineLister, expectPipeline)
f.namespaceLister = append(f.namespaceLister, ns)
f.objects = append(f.objects, expectPipeline)
f.initDevOpsProject = nsName
f.initPipeline = []*devops.Pipeline{initPipeline}
f.expectPipeline = []*devops.Pipeline{expectPipeline}
f.run(getKey(expectPipeline, t))
}