Add golangci-lint workflow (#4999)

* fix lint workflow

* add golang lint

* close http response body
This commit is contained in:
andrew_li
2022-06-29 11:58:36 +08:00
committed by GitHub
parent f1e06466df
commit f289795312
141 changed files with 311 additions and 592 deletions

View File

@@ -262,7 +262,7 @@ func (c *Controller) Approve(csr *certificatesv1.CertificateSigningRequest) erro
}
// approve csr
csr, err := c.k8sclient.CertificatesV1().CertificateSigningRequests().UpdateApproval(context.Background(), csr.Name, csr, metav1.UpdateOptions{})
_, err := c.k8sclient.CertificatesV1().CertificateSigningRequests().UpdateApproval(context.Background(), csr.Name, csr, metav1.UpdateOptions{})
if err != nil {
klog.Errorln(err)
return err

View File

@@ -79,25 +79,16 @@ const (
maxRetries = 15
kubefedNamespace = "kube-federation-system"
openpitrixRuntime = "openpitrix.io/runtime"
kubesphereManaged = "kubesphere.io/managed"
// Actually host cluster name can be anything, there is only necessary when calling JoinFederation function
hostClusterName = "kubesphere"
// allocate kubernetesAPIServer port in range [portRangeMin, portRangeMax] for agents if port is not specified
// kubesphereAPIServer port is defaulted to kubernetesAPIServerPort + 10000
portRangeMin = 6000
portRangeMax = 7000
// proxy format
proxyFormat = "%s/api/v1/namespaces/kubesphere-system/services/:ks-apiserver:80/proxy/%s"
// mulitcluster configuration name
configzMultiCluster = "multicluster"
// probe cluster timeout
probeClusterTimeout = 3 * time.Second
)
// Cluster template for reconcile host cluster if there is none.
@@ -372,7 +363,7 @@ func (c *clusterController) syncCluster(key string) error {
// currently we didn't set cluster.Spec.Enable when creating cluster at client side, so only check
// if we enable cluster.Spec.JoinFederation now
if cluster.Spec.JoinFederation == false {
if !cluster.Spec.JoinFederation {
klog.V(5).Infof("Skipping to join cluster %s cause it is not expected to join", cluster.Name)
return nil
}
@@ -590,6 +581,8 @@ func (c *clusterController) tryToFetchKubeSphereComponents(host string, transpor
return nil, err
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
klog.V(4).Infof("Response status code isn't 200.")
return nil, fmt.Errorf("response code %d", response.StatusCode)
@@ -616,6 +609,8 @@ func (c *clusterController) tryFetchKubeSphereVersion(host string, transport htt
return "", err
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
klog.V(4).Infof("Response status code isn't 200.")
return "", fmt.Errorf("response code %d", response.StatusCode)
@@ -671,16 +666,6 @@ func (c *clusterController) handleErr(err error, key interface{}) {
utilruntime.HandleError(err)
}
// isConditionTrue checks cluster specific condition value is True, return false if condition not exists
func isConditionTrue(cluster *clusterv1alpha1.Cluster, conditionType clusterv1alpha1.ClusterConditionType) bool {
for _, condition := range cluster.Status.Conditions {
if condition.Type == conditionType && condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
// updateClusterCondition updates condition in cluster conditions using giving condition
// adds condition if not existed
func (c *clusterController) updateClusterCondition(cluster *clusterv1alpha1.Cluster, condition clusterv1alpha1.ClusterCondition) {

View File

@@ -403,8 +403,6 @@ func (v *DestinationRuleController) addDeployment(obj interface{}) {
for key := range services {
v.queue.Add(key)
}
return
}
func (v *DestinationRuleController) deleteDeployment(obj interface{}) {

View File

@@ -76,7 +76,8 @@ type Controller struct {
// Kubernetes API.
recorder record.EventRecorder
multiClusterEnabled bool
devopsClient devops.Interface
//nolint:unused
devopsClient devops.Interface
}
func NewController(k8sClient kubernetes.Interface, ksClient kubesphere.Interface,

View File

@@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
@@ -62,7 +61,6 @@ const (
type Controller struct {
controller.BaseController
scheme *runtime.Scheme
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
groupInformer iamv1alpha2informers.GroupInformer
@@ -220,7 +218,7 @@ func (c *Controller) reconcile(key string) error {
return item == finalizer
})
if group, err = c.ksClient.IamV1alpha2().Groups().Update(context.Background(), group, metav1.UpdateOptions{}); err != nil {
if _, err = c.ksClient.IamV1alpha2().Groups().Update(context.Background(), group, metav1.UpdateOptions{}); err != nil {
return err
}
}

View File

@@ -44,7 +44,6 @@ import (
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
)
@@ -152,6 +151,7 @@ func (f *fixture) run(userName string) {
f.runController(userName, true, false)
}
//nolint:unused
func (f *fixture) runExpectError(userName string) {
f.runController(userName, true, true)
}

View File

@@ -25,7 +25,6 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
@@ -59,7 +58,6 @@ const (
type Controller struct {
controller.BaseController
scheme *runtime.Scheme
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
groupBindingLister iamv1alpha2listers.GroupBindingLister
@@ -137,7 +135,7 @@ func (c *Controller) reconcile(key string) error {
}
}
if g != nil {
if groupBinding, err = c.ksClient.IamV1alpha2().GroupBindings().Update(context.Background(), g, metav1.UpdateOptions{}); err != nil {
if _, err = c.ksClient.IamV1alpha2().GroupBindings().Update(context.Background(), g, metav1.UpdateOptions{}); err != nil {
return err
}
// Skip reconcile when group is updated.
@@ -156,7 +154,7 @@ func (c *Controller) reconcile(key string) error {
return item == finalizer
})
if groupBinding, err = c.ksClient.IamV1alpha2().GroupBindings().Update(context.Background(), groupBinding, metav1.UpdateOptions{}); err != nil {
if _, err = c.ksClient.IamV1alpha2().GroupBindings().Update(context.Background(), groupBinding, metav1.UpdateOptions{}); err != nil {
return err
}
}

View File

@@ -59,7 +59,8 @@ type fixture struct {
ksclient *fake.Clientset
k8sclient *k8sfake.Clientset
// Objects to put in the store.
groupBindingLister []*v1alpha2.GroupBinding
groupBindingLister []*v1alpha2.GroupBinding
//nolint:unused
fedgroupBindingLister []*fedv1beta1types.FederatedGroupBinding
userLister []*v1alpha2.User
// Actions expected to happen on the client.
@@ -163,6 +164,7 @@ func (f *fixture) run(userName string) {
f.runController(userName, true, false)
}
//nolint:unused
func (f *fixture) runExpectError(userName string) {
f.runController(userName, true, true)
}

View File

@@ -24,7 +24,6 @@ import (
"k8s.io/client-go/rest"
"k8s.io/klog/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
@@ -40,7 +39,6 @@ import (
var cfg *rest.Config
var testEnv *envtest.Environment
var k8sClient client.Client
func TestApplicationController(t *testing.T) {
RegisterFailHandler(Fail)

View File

@@ -37,7 +37,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
@@ -52,9 +51,7 @@ const (
)
type JobController struct {
client clientset.Interface
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
client clientset.Interface
jobLister batchv1listers.JobLister
jobSynced cache.InformerSynced
@@ -172,16 +169,6 @@ func (v *JobController) syncJob(key string) error {
return nil
}
// When a job is added, figure out which service it will be used
// and enqueue it. obj must have *batchv1.Job type
func (v *JobController) addJob(obj interface{}) {
deploy := obj.(*batchv1.Job)
v.queue.Add(deploy.Name)
return
}
func (v *JobController) handleErr(err error, key interface{}) {
if err == nil {
v.queue.Forget(key)

View File

@@ -38,11 +38,13 @@ var (
)
type fixture struct {
t *testing.T
kubeclient *k8sfake.Clientset
t *testing.T
kubeclient *k8sfake.Clientset
//nolint:unused
jobController *JobController
jobLister []*batchv1.Job
//nolint:unused
kubeactions []core.Action
actions []core.Action

View File

@@ -56,9 +56,7 @@ type loginRecordController struct {
k8sClient kubernetes.Interface
ksClient kubesphere.Interface
loginRecordLister iamv1alpha2listers.LoginRecordLister
loginRecordSynced cache.InformerSynced
userLister iamv1alpha2listers.UserLister
userSynced cache.InformerSynced
loginHistoryRetentionPeriod time.Duration
loginHistoryMaximumEntries int
// recorder is an event recorder for recording Event resources to the

View File

@@ -48,10 +48,6 @@ func TestIPPoolSuit(t *testing.T) {
RunSpecs(t, "IPPool Suite")
}
var (
alwaysReady = func() bool { return true }
)
var _ = Describe("test ippool", func() {
pool := &v1alpha1.IPPool{
TypeMeta: v1.TypeMeta{},
@@ -138,11 +134,7 @@ var _ = Describe("test ippool", func() {
Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
if result.Status.Allocations != 1 {
return false
}
return true
return result.Status.Allocations == 1
}, 3*time.Second).Should(Equal(true))
})
@@ -153,11 +145,7 @@ var _ = Describe("test ippool", func() {
ipamClient.ReleaseByHandle("testhandle")
Eventually(func() bool {
result, _ := ksclient.NetworkV1alpha1().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
if result.Status.Allocations != 0 {
return false
}
return true
return result.Status.Allocations == 0
}, 3*time.Second).Should(Equal(true))
err := ksclient.NetworkV1alpha1().IPPools().Delete(context.TODO(), pool.Name, v1.DeleteOptions{})

View File

@@ -28,7 +28,6 @@ import (
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/util/yaml"
kubeinformers "k8s.io/client-go/informers"
informerv1 "k8s.io/client-go/informers/core/v1"
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog"
@@ -37,21 +36,15 @@ import (
ksfake "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
nsnppolicyinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
workspaceinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy/provider"
options "kubesphere.io/kubesphere/pkg/simple/client/network"
)
var (
c *NSNetworkPolicyController
stopCh chan struct{}
nsnpInformer nsnppolicyinformer.NamespaceNetworkPolicyInformer
serviceInformer informerv1.ServiceInformer
workspaceInformer workspaceinformer.WorkspaceInformer
namespaceInformer informerv1.NamespaceInformer
alwaysReady = func() bool { return true }
c *NSNetworkPolicyController
stopCh chan struct{}
alwaysReady = func() bool { return true }
)
const (

View File

@@ -89,6 +89,7 @@ func (c *k8sPolicyController) run(threadiness int, reconcilerPeriod string, stop
// Wait until we are in sync with the Kubernetes API before starting the
// resource cache.
klog.Info("Waiting to sync with Kubernetes API (NetworkPolicy)")
//nolint:staticcheck
if ok := cache.WaitForCacheSync(stopCh, c.hasSynced); !ok {
}
klog.Infof("Finished syncing with Kubernetes API (NetworkPolicy)")

View File

@@ -279,13 +279,13 @@ func (c *Controller) multiClusterSync(ctx context.Context, obj client.Object) er
return err
}
switch obj.(type) {
switch obj := obj.(type) {
case *v2beta1.Config:
return c.syncFederatedConfig(obj.(*v2beta1.Config))
return c.syncFederatedConfig(obj)
case *v2beta1.Receiver:
return c.syncFederatedReceiver(obj.(*v2beta1.Receiver))
return c.syncFederatedReceiver(obj)
case *corev1.Secret:
return c.syncFederatedSecret(obj.(*corev1.Secret))
return c.syncFederatedSecret(obj)
default:
klog.Errorf("unknown type for notification, %v", obj)
return nil

View File

@@ -55,9 +55,6 @@ var (
v2beta1.AddToScheme(scheme.Scheme)
apis.AddToScheme(scheme.Scheme)
const timeout = time.Second * 30
const interval = time.Second * 1
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
@@ -100,6 +97,7 @@ var (
)
BeforeEach(func() {
k8sClient = fakek8s.NewSimpleClientset()
//nolint:staticcheck
cl = fake.NewFakeClientWithScheme(scheme.Scheme)
informerCacheCtx = context.TODO()
ksCache = &fakeCache{

View File

@@ -41,10 +41,6 @@ func init() {
registerMetrics()
}
const (
helmApplicationControllerName = "helm-application-controller"
)
var _ reconcile.Reconciler = &ReconcileHelmApplication{}
// ReconcileHelmApplication reconciles a federated helm application object

View File

@@ -96,10 +96,7 @@ var _ = Describe("helmApplication", func() {
Eventually(func() bool {
var ver v1alpha1.HelmApplicationVersion
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: appVer2.Name}, &ver)
if apierrors.IsNotFound(err) {
return true
}
return false
return apierrors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
By("Active app version exists")

View File

@@ -53,7 +53,7 @@ func (r *ReconcileHelmApplicationVersion) Reconcile(ctx context.Context, request
start := time.Now()
klog.V(4).Infof("sync helm application version: %s", request.String())
defer func() {
klog.V(4).Infof("sync helm application version end: %s, elapsed: %v", request.String(), time.Now().Sub(start))
klog.V(4).Infof("sync helm application version end: %s, elapsed: %v", request.String(), time.Since(start))
}()
appVersion := &v1alpha1.HelmApplicationVersion{}
@@ -97,10 +97,7 @@ func (r *ReconcileHelmApplicationVersion) Reconcile(ctx context.Context, request
// Delete HelmApplicationVersion
appVersion.ObjectMeta.Finalizers = sliceutil.RemoveString(appVersion.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmAppVersionFinalizer {
return true
}
return false
return item == HelmAppVersionFinalizer
})
if err := r.Update(context.Background(), appVersion); err != nil {
return reconcile.Result{}, err

View File

@@ -25,8 +25,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -178,8 +176,6 @@ var _ reconcile.Reconciler = &ReconcileHelmCategory{}
type ReconcileHelmCategory struct {
client.Client
//Scheme *runtime.Scheme
recorder record.EventRecorder
config *rest.Config
}
func (r *ReconcileHelmCategory) SetupWithManager(mgr ctrl.Manager) error {
@@ -195,7 +191,7 @@ func (r *ReconcileHelmCategory) Reconcile(ctx context.Context, request reconcile
start := time.Now()
klog.V(4).Infof("sync helm category: %s", request.String())
defer func() {
klog.V(4).Infof("sync helm category end: %s, elapsed: %v", request.String(), time.Now().Sub(start))
klog.V(4).Infof("sync helm category end: %s, elapsed: %v", request.String(), time.Since(start))
}()
instance := &v1alpha1.HelmCategory{}
@@ -237,10 +233,7 @@ func (r *ReconcileHelmCategory) Reconcile(ctx context.Context, request reconcile
}
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmCategoryFinalizer {
return true
}
return false
return item == HelmCategoryFinalizer
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err
@@ -297,8 +290,7 @@ func (r *ReconcileHelmCategory) updateCategoryCount(id string) error {
func (r *ReconcileHelmCategory) countApplications(id string) (int, error) {
list := v1alpha1.HelmApplicationList{}
var err error
err = r.List(context.TODO(), &list, client.MatchingLabels{
err := r.List(context.TODO(), &list, client.MatchingLabels{
constants.CategoryIdLabelKey: id,
constants.ChartRepoIdLabelKey: v1alpha1.AppStoreRepoId,
})

View File

@@ -39,7 +39,7 @@ func (r *ReconcileHelmRelease) GetChartData(rls *v1alpha1.HelmRelease) (chartNam
return chartName, chartData, ErrGetRepoFailed
}
index, err := helmrepoindex.ByteArrayToSavedIndex([]byte(repo.Status.Data))
index, _ := helmrepoindex.ByteArrayToSavedIndex([]byte(repo.Status.Data))
if version := index.GetApplicationVersion(rls.Spec.ApplicationId, rls.Spec.ApplicationVersionId); version != nil {
url := version.Spec.URLs[0]

View File

@@ -27,8 +27,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -68,10 +66,8 @@ type ReconcileHelmRelease struct {
StorageClient s3.Interface
KsFactory externalversions.SharedInformerFactory
client.Client
recorder record.EventRecorder
// mock helm install && uninstall
helmMock bool
informer cache.SharedIndexInformer
checkReleaseStatusBackoff *flowcontrol.Backoff
clusterClients clusterclient.ClusterClients
@@ -163,10 +159,7 @@ func (r *ReconcileHelmRelease) Reconcile(ctx context.Context, request reconcile.
klog.V(3).Infof("remove helm release %s finalizer", instance.Name)
// remove finalizer
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmReleaseFinalizer {
return true
}
return false
return item == HelmReleaseFinalizer
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err

View File

@@ -102,7 +102,7 @@ func (r *ReconcileHelmRepo) Reconcile(ctx context.Context, request reconcile.Req
start := time.Now()
klog.Infof("sync repo: %s", request.Name)
defer func() {
klog.Infof("sync repo end: %s, elapsed: %v", request.Name, time.Now().Sub(start))
klog.Infof("sync repo end: %s, elapsed: %v", request.Name, time.Since(start))
}()
// Fetch the helmrepoes instance
instance := &v1alpha1.HelmRepo{}
@@ -137,10 +137,7 @@ func (r *ReconcileHelmRepo) Reconcile(ctx context.Context, request reconcile.Req
if sliceutil.HasString(instance.ObjectMeta.Finalizers, HelmRepoFinalizer) {
// remove our finalizer from the list and update it.
instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool {
if item == HelmRepoFinalizer {
return true
}
return false
return item == HelmRepoFinalizer
})
if err := r.Update(context.Background(), instance); err != nil {
return reconcile.Result{}, err

View File

@@ -174,9 +174,8 @@ func (a *accessor) GetQuotas(namespaceName string) ([]corev1.ResourceQuota, erro
func (a *accessor) waitForReadyResourceQuotaNames(namespaceName string) ([]string, error) {
ctx := context.TODO()
var resourceQuotaNames []string
var err error
// wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds.
err = utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) {
err := utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) {
resourceQuotaNames, err = resourceQuotaNamesFor(ctx, a.client, namespaceName)
// if we can't find the namespace yet, just wait for the cache to update. Requests to non-existent namespaces
// may hang, but those people are doing something wrong and namespace lifecycle should reject them.
@@ -194,9 +193,8 @@ func (a *accessor) waitForReadyResourceQuotaNames(namespaceName string) ([]strin
func (a *accessor) waitForReadyNamespacedResourceQuotas(namespaceName string) ([]corev1.ResourceQuota, error) {
ctx := context.TODO()
var resourceQuotas []corev1.ResourceQuota
var err error
// wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds.
err = utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) {
err := utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) {
resourceQuotaList := &corev1.ResourceQuotaList{}
err = a.client.List(ctx, resourceQuotaList, &client.ListOptions{Namespace: namespaceName})
if err != nil {

View File

@@ -85,6 +85,7 @@ var _ = Describe("ServiceAccount", func() {
ctx := context.Background()
reconciler := &Reconciler{
//nolint:staticcheck
Client: fake.NewFakeClientWithScheme(scheme.Scheme),
logger: ctrl.Log.WithName("controllers").WithName("acrpullbinding-controller"),
scheme: scheme.Scheme,
@@ -109,6 +110,7 @@ var _ = Describe("ServiceAccount", func() {
ctx := context.Background()
reconciler := &Reconciler{
//nolint:staticcheck
Client: fake.NewFakeClientWithScheme(scheme.Scheme),
logger: ctrl.Log.WithName("controllers").WithName("acrpullbinding-controller"),
scheme: scheme.Scheme,

View File

@@ -154,7 +154,6 @@ func (c *StorageCapabilityController) enqueueStorageClassByCSI(csi interface{})
c.enqueueStorageClass(obj)
}
}
return
}
func (c *StorageCapabilityController) runWorker() {
@@ -246,7 +245,6 @@ func (c *StorageCapabilityController) updateSnapshotAnnotation(storageClass *sto
if _, err := strconv.ParseBool(storageClass.Annotations[annotationAllowSnapshot]); err != nil {
storageClass.Annotations[annotationAllowSnapshot] = strconv.FormatBool(snapshotAllow)
}
return
}
func (c *StorageCapabilityController) updateCloneVolumeAnnotation(storageClass *storagev1.StorageClass, cloneAllow bool) {
@@ -256,7 +254,6 @@ func (c *StorageCapabilityController) updateCloneVolumeAnnotation(storageClass *
if _, err := strconv.ParseBool(storageClass.Annotations[annotationAllowClone]); err != nil {
storageClass.Annotations[annotationAllowClone] = strconv.FormatBool(cloneAllow)
}
return
}
func (c *StorageCapabilityController) removeAnnotations(storageClass *storagev1.StorageClass) {

View File

@@ -47,7 +47,8 @@ type fixture struct {
snapshotSupported bool
// Clients
k8sClient *k8sfake.Clientset
ksClient *ksfake.Clientset
//nolint:unused
ksClient *ksfake.Clientset
// Objects from here preload into NewSimpleFake.
storageObjects []runtime.Object // include StorageClass
// Objects to put in the store.

View File

@@ -45,12 +45,14 @@ var (
)
type fixture struct {
t *testing.T
t *testing.T
//nolint:unused
snapshotSupported bool
// Clients
k8sClient *k8sFake.Clientset
snapshotClassClient *snapFake.Clientset
ksClient *ksfake.Clientset
//nolint:unused
ksClient *ksfake.Clientset
// Objects from here preload into NewSimpleFake.
storageObjects []runtime.Object // include StorageClass
snapshotClassObjects []runtime.Object

View File

@@ -19,7 +19,6 @@ package controller
import (
"fmt"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -28,9 +27,8 @@ import (
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
controllerName = "base-controler-test"
alwaysReady = func() bool { return true }
controllerName = "base-controler-test"
)
type fixture struct {
@@ -48,7 +46,6 @@ func (in *fakeObj) DeepCopyInto(out *fakeObj) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalRole.

View File

@@ -227,6 +227,7 @@ func (v *VirtualServiceController) syncService(key string) error {
}
// default component name to service name
//nolint:ineffassign,staticcheck
appName := name
defer func() {
@@ -322,7 +323,6 @@ func (v *VirtualServiceController) syncService(key string) error {
switch strategies[0].Spec.StrategyPolicy {
case servicemeshv1alpha2.PolicyPause:
vs.Spec = v.generateDefaultVirtualServiceSpec(name, subsets, service).Spec
break
case servicemeshv1alpha2.PolicyWaitForWorkloadReady:
set := v.getSubsets(strategies[0])

View File

@@ -189,43 +189,6 @@ func newDestinationRule(name string, host string, labels map[string]string, subs
return &dr
}
func newStrategy(name string, service *v1.Service, principalVersion string) *v1alpha2.Strategy {
st := v1alpha2.Strategy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: service.Namespace,
Labels: NewLabels().WithApp(""),
Annotations: nil,
},
Spec: v1alpha2.StrategySpec{
Type: v1alpha2.CanaryType,
PrincipalVersion: principalVersion,
GovernorVersion: "",
Template: v1alpha2.VirtualServiceTemplateSpec{
ObjectMeta: metav1.ObjectMeta{},
Spec: apiv1alpha3.VirtualService{
Hosts: []string{service.Name},
Http: []*apiv1alpha3.HTTPRoute{
{
Route: []*apiv1alpha3.HTTPRouteDestination{
{
Destination: &apiv1alpha3.Destination{
Host: service.Name,
Subset: "",
},
},
},
},
},
},
},
StrategyPolicy: v1alpha2.PolicyImmediately,
},
}
return &st
}
func toHost(service *v1.Service) string {
return fmt.Sprintf("%s.%s.svc", service.Name, service.Namespace)
}

View File

@@ -18,7 +18,6 @@ package workspacerolebinding
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
@@ -55,7 +54,7 @@ var _ = Describe("WorkspaceRoleBinding", func() {
It("Should create successfully", func() {
workspaceAdminBinding := &iamv1alpha2.WorkspaceRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("admin-workspace1-admin"),
Name: "admin-workspace1-admin",
Labels: map[string]string{tenantv1alpha1.WorkspaceLabel: workspace.Name},
},
RoleRef: rbacv1.RoleRef{

View File

@@ -46,6 +46,7 @@ var _ = Describe("WorkspaceTemplate", func() {
BeforeEach(func() {
reconciler = &Reconciler{
//nolint:staticcheck
Client: fake.NewFakeClientWithScheme(scheme.Scheme),
Logger: ctrl.Log.WithName("controllers").WithName("acrpullbinding-controller"),
Recorder: record.NewFakeRecorder(5),