Remove the VolumeSnapshot controller that creates a snapshot class automatically (#5380)

Signed-off-by: dkeven <dkvvven@gmail.com>

Signed-off-by: dkeven <dkvvven@gmail.com>
This commit is contained in:
dkeven
2022-11-24 17:47:48 +08:00
committed by GitHub
parent e0cb0a889b
commit 991e479932
3 changed files with 0 additions and 482 deletions

View File

@@ -52,8 +52,6 @@ import (
ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap"
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/controller/storage/snapshotclass"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/controller/certificatesigningrequest"
@@ -100,7 +98,6 @@ var allControllers = []string{
"destinationrule",
"job",
"storagecapability",
"volumesnapshot",
"pvcautoresizer",
"workloadrestart",
"loginrecord",
@@ -343,16 +340,6 @@ func addAllControllers(mgr manager.Manager, client k8s.Client, informerFactory i
addController(mgr, "storagecapability", storageCapabilityController)
}
// "volumesnapshot" controller
if cmOptions.IsControllerEnabled("volumesnapshot") {
volumeSnapshotController := snapshotclass.NewController(
kubernetesInformer.Storage().V1().StorageClasses(),
client.Snapshot().SnapshotV1().VolumeSnapshotClasses(),
informerFactory.SnapshotSharedInformerFactory().Snapshot().V1().VolumeSnapshotClasses(),
)
addController(mgr, "volumesnapshot", volumeSnapshotController)
}
// "pvc-autoresizer"
monitoringOptionsEnable := cmOptions.MonitoringOptions != nil && len(cmOptions.MonitoringOptions.Endpoint) != 0
if monitoringOptionsEnable {

View File

@@ -1,209 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshotclass
import (
"context"
"fmt"
"strconv"
"time"
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1"
snapinformers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions/volumesnapshot/v1"
snapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
)
const annotationAllowSnapshot = "storageclass.kubesphere.io/allow-snapshot"
type VolumeSnapshotClassController struct {
storageClassLister storagelistersv1.StorageClassLister
storageClassSynced cache.InformerSynced
snapshotClassClient snapshotclient.VolumeSnapshotClassInterface
snapshotClassLister snapshotlisters.VolumeSnapshotClassLister
snapshotClassSynced cache.InformerSynced
snapshotClassWorkQueue workqueue.RateLimitingInterface
}
// This controller is responsible to watch StorageClass
// When storageClass has created ,create snapshot class
func NewController(
storageClassInformer storageinformersv1.StorageClassInformer,
snapshotClassClient snapshotclient.VolumeSnapshotClassInterface,
snapshotClassInformer snapinformers.VolumeSnapshotClassInformer,
) *VolumeSnapshotClassController {
controller := &VolumeSnapshotClassController{
storageClassLister: storageClassInformer.Lister(),
storageClassSynced: storageClassInformer.Informer().HasSynced,
snapshotClassClient: snapshotClassClient,
snapshotClassLister: snapshotClassInformer.Lister(),
snapshotClassSynced: snapshotClassInformer.Informer().HasSynced,
snapshotClassWorkQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "SnapshotClass"),
}
storageClassInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueStorageClass,
UpdateFunc: func(old, new interface{}) {
newStorageClass := new.(*storagev1.StorageClass)
oldStorageClass := old.(*storagev1.StorageClass)
if newStorageClass.ResourceVersion == oldStorageClass.ResourceVersion {
return
}
controller.enqueueStorageClass(newStorageClass)
},
DeleteFunc: controller.enqueueStorageClass,
})
return controller
}
func (c *VolumeSnapshotClassController) Start(ctx context.Context) error {
return c.Run(5, ctx.Done())
}
func (c *VolumeSnapshotClassController) Run(threadCnt int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.snapshotClassWorkQueue.ShutDown()
klog.Info("Waiting for informer cache to sync.")
cacheSyncs := []cache.InformerSynced{
c.storageClassSynced,
c.snapshotClassSynced,
}
if ok := cache.WaitForCacheSync(stopCh, cacheSyncs...); !ok {
return fmt.Errorf("failed to wait for caches to syne")
}
for i := 0; i < threadCnt; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
}
func (c *VolumeSnapshotClassController) enqueueStorageClass(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.snapshotClassWorkQueue.Add(key)
}
func (c *VolumeSnapshotClassController) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *VolumeSnapshotClassController) processNextWorkItem() bool {
obj, shutdown := c.snapshotClassWorkQueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.snapshotClassWorkQueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
c.snapshotClassWorkQueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workQueue but got %#v", obj))
return nil
}
if err := c.syncHandler(key); err != nil {
c.snapshotClassWorkQueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
c.snapshotClassWorkQueue.Forget(obj)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
func (c *VolumeSnapshotClassController) syncHandler(key string) error {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
storageClass, err := c.storageClassLister.Get(name)
if err != nil {
// StorageClass has been deleted, delete VolumeSnapshotClass
if errors.IsNotFound(err) {
err = c.deleteSnapshotClass(name)
}
return err
}
if storageClass.Annotations != nil {
if annotationSnap, ok := storageClass.Annotations[annotationAllowSnapshot]; ok {
allowSnapshot, err := strconv.ParseBool(annotationSnap)
if err == nil && allowSnapshot {
// If VolumeSnapshotClass not exist, create it
_, err = c.snapshotClassLister.Get(name)
if err != nil {
if errors.IsNotFound(err) {
volumeSnapshotClassCreate := &snapshotv1.VolumeSnapshotClass{
ObjectMeta: metav1.ObjectMeta{Name: name},
Driver: storageClass.Provisioner,
DeletionPolicy: snapshotv1.VolumeSnapshotContentDelete,
}
_, err = c.snapshotClassClient.Create(context.Background(), volumeSnapshotClassCreate, metav1.CreateOptions{})
}
}
}
return err
}
}
return nil
}
func (c *VolumeSnapshotClassController) deleteSnapshotClass(name string) error {
_, err := c.snapshotClassLister.Get(name)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
klog.Infof("Delete SnapshotClass %s", name)
return c.snapshotClassClient.Delete(context.Background(), name, metav1.DeleteOptions{})
}

View File

@@ -1,260 +0,0 @@
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshotclass
import (
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
snapshotV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
snapFake "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake"
snapInformersV1 "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions"
storageV1 "k8s.io/api/storage/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
k8sInformers "k8s.io/client-go/informers"
k8sFake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
)
var (
noReSyncPeriodFunc = func() time.Duration { return 0 }
)
type fixture struct {
t *testing.T
//nolint:unused
snapshotSupported bool
// Clients
k8sClient *k8sFake.Clientset
snapshotClassClient *snapFake.Clientset
//nolint:unused
ksClient *ksfake.Clientset
// Objects from here preload into NewSimpleFake.
storageObjects []runtime.Object // include StorageClass
snapshotClassObjects []runtime.Object
// Objects to put in the store.
storageClassLister []*storageV1.StorageClass
snapshotClassLister []*snapshotV1.VolumeSnapshotClass
// Actions expected to happen on the client.
actions []core.Action
}
func newFixture(t *testing.T) *fixture {
return &fixture{t: t}
}
func (f *fixture) newController() (*VolumeSnapshotClassController, k8sInformers.SharedInformerFactory, snapInformersV1.SharedInformerFactory) {
f.k8sClient = k8sFake.NewSimpleClientset(f.storageObjects...)
f.snapshotClassClient = snapFake.NewSimpleClientset(f.snapshotClassObjects...)
k8sInformers := k8sInformers.NewSharedInformerFactory(f.k8sClient, noReSyncPeriodFunc())
snapshotInformers := snapInformersV1.NewSharedInformerFactory(f.snapshotClassClient, noReSyncPeriodFunc())
c := NewController(
k8sInformers.Storage().V1().StorageClasses(),
f.snapshotClassClient.SnapshotV1().VolumeSnapshotClasses(),
snapshotInformers.Snapshot().V1().VolumeSnapshotClasses(),
)
for _, storageClass := range f.storageClassLister {
_ = k8sInformers.Storage().V1().StorageClasses().Informer().GetIndexer().Add(storageClass)
}
for _, snapshotClass := range f.snapshotClassLister {
_ = snapshotInformers.Snapshot().V1().VolumeSnapshotClasses().Informer().GetIndexer().Add(snapshotClass)
}
return c, k8sInformers, snapshotInformers
}
func (f *fixture) runController(scName string, startInformers bool, expectError bool) {
c, k8sI, snapI := f.newController()
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
k8sI.Start(stopCh)
snapI.Start(stopCh)
}
err := c.syncHandler(scName)
if !expectError && err != nil {
f.t.Errorf("error syncing: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing, got nil")
}
var actions []core.Action
actions = append(actions, f.snapshotClassClient.Actions()...)
actions = append(actions, f.k8sClient.Actions()...)
filerActions := filterInformerActions(actions)
if len(filerActions) != len(f.actions) {
f.t.Errorf("count of actions: differ (-got, +want): %s", cmp.Diff(filerActions, f.actions))
return
}
for i, action := range filerActions {
expectedAction := f.actions[i]
checkAction(expectedAction, action, f.t)
}
}
func (f *fixture) run(scName string) {
f.runController(scName, true, false)
}
func (f *fixture) expectCreateSnapshotClassAction(snapshotClass *snapshotV1.VolumeSnapshotClass) {
f.actions = append(f.actions, core.NewCreateAction(
schema.GroupVersionResource{Resource: "volumesnapshotclasses"}, snapshotClass.Namespace, snapshotClass))
}
func (f *fixture) expectDeleteSnapshotClassAction(snapshotClass *snapshotV1.VolumeSnapshotClass) {
f.actions = append(f.actions, core.NewDeleteAction(
schema.GroupVersionResource{Resource: "volumesnapshotclasses"}, snapshotClass.Namespace, snapshotClass.Name))
}
// filterInformerActions filters list and watch actions for testing resources.
// Since list and watch don't change resource state we can filter it to lower
// nose level in our tests.
func filterInformerActions(actions []core.Action) []core.Action {
var ret []core.Action
for _, action := range actions {
if action.GetVerb() == "list" || action.GetVerb() == "watch" {
continue
}
ret = append(ret, action)
}
return ret
}
// checkAction verifies that expected and actual actions are equal and both have
// same attached resources
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("\nExpected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if difference := cmp.Diff(object, expObject); len(difference) > 0 {
t.Errorf("[CreateAction] %T differ (-got, +want): %s", expObject, difference)
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if difference := cmp.Diff(object, expObject); len(difference) > 0 {
t.Errorf("[UpdateAction] %T differ (-got, +want): %s", expObject, difference)
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
case core.DeleteActionImpl:
e, _ := expected.(core.DeleteActionImpl)
if difference := cmp.Diff(e.Name, a.Name); len(difference) > 0 {
t.Errorf("[UpdateAction] %T differ (-got, +want): %s", e.Name, difference)
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
func newStorageClass(name string) *storageV1.StorageClass {
isExpansion := true
return &storageV1.StorageClass{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
AllowVolumeExpansion: &isExpansion,
}
}
func newSnapshotClass(storageClass *storageV1.StorageClass) *snapshotV1.VolumeSnapshotClass {
return &snapshotV1.VolumeSnapshotClass{
ObjectMeta: v1.ObjectMeta{
Name: storageClass.Name,
},
Driver: storageClass.Provisioner,
DeletionPolicy: snapshotV1.VolumeSnapshotContentDelete,
}
}
func getKey(sc *storageV1.StorageClass, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(sc)
if err != nil {
t.Errorf("Unexpected error getting key for %v: %v", sc.Name, err)
return ""
}
return key
}
func TestCreateStorageClass(t *testing.T) {
fixture := newFixture(t)
storageClass := newStorageClass("csi-example")
storageClass.Annotations = map[string]string{annotationAllowSnapshot: "true"}
snapshotClass := newSnapshotClass(storageClass)
// Objects exist
fixture.storageObjects = append(fixture.storageObjects, storageClass)
fixture.storageClassLister = append(fixture.storageClassLister, storageClass)
// Action expected
fixture.expectCreateSnapshotClassAction(snapshotClass)
// Run test
fixture.run(getKey(storageClass, t))
}
func TestDeleteStorageClass(t *testing.T) {
storageClass := newStorageClass("csi-example")
snapshotClass := newSnapshotClass(storageClass)
fixture := newFixture(t)
// Object exist
fixture.storageObjects = append(fixture.storageObjects, storageClass)
fixture.snapshotClassObjects = append(fixture.snapshotClassObjects, snapshotClass)
fixture.snapshotClassLister = append(fixture.snapshotClassLister, snapshotClass)
// Action expected
fixture.expectDeleteSnapshotClassAction(snapshotClass)
// Run test
fixture.run(getKey(storageClass, t))
}