diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index cda20cca9..e5df4fa0d 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -31,30 +31,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/kubefed/pkg/controller/util" + iamv1alpha2 "kubesphere.io/api/iam/v1alpha2" + "kubesphere.io/kubesphere/cmd/controller-manager/app/options" "kubesphere.io/kubesphere/pkg/controller/alerting" "kubesphere.io/kubesphere/pkg/controller/application" - "kubesphere.io/kubesphere/pkg/controller/helm" - "kubesphere.io/kubesphere/pkg/controller/namespace" - "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmapplication" - "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmcategory" - "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmrelease" - "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmrepo" - "kubesphere.io/kubesphere/pkg/controller/quota" - "kubesphere.io/kubesphere/pkg/controller/serviceaccount" - "kubesphere.io/kubesphere/pkg/controller/user" - "kubesphere.io/kubesphere/pkg/controller/workspace" - "kubesphere.io/kubesphere/pkg/controller/workspacerole" - "kubesphere.io/kubesphere/pkg/controller/workspacerolebinding" - "kubesphere.io/kubesphere/pkg/controller/workspacetemplate" - "kubesphere.io/kubesphere/pkg/models/kubeconfig" - "kubesphere.io/kubesphere/pkg/simple/client/devops" - "kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins" - ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" - "kubesphere.io/kubesphere/pkg/simple/client/s3" - - iamv1alpha2 "kubesphere.io/api/iam/v1alpha2" - "kubesphere.io/kubesphere/pkg/controller/certificatesigningrequest" "kubesphere.io/kubesphere/pkg/controller/cluster" "kubesphere.io/kubesphere/pkg/controller/clusterrolebinding" @@ -63,17 +44,35 @@ import ( "kubesphere.io/kubesphere/pkg/controller/globalrolebinding" "kubesphere.io/kubesphere/pkg/controller/group" "kubesphere.io/kubesphere/pkg/controller/groupbinding" + "kubesphere.io/kubesphere/pkg/controller/helm" "kubesphere.io/kubesphere/pkg/controller/job" "kubesphere.io/kubesphere/pkg/controller/loginrecord" + "kubesphere.io/kubesphere/pkg/controller/namespace" "kubesphere.io/kubesphere/pkg/controller/network/ippool" "kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy" "kubesphere.io/kubesphere/pkg/controller/network/nsnetworkpolicy/provider" "kubesphere.io/kubesphere/pkg/controller/notification" + "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmapplication" + "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmcategory" + "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmrelease" + "kubesphere.io/kubesphere/pkg/controller/openpitrix/helmrepo" + "kubesphere.io/kubesphere/pkg/controller/quota" + "kubesphere.io/kubesphere/pkg/controller/serviceaccount" "kubesphere.io/kubesphere/pkg/controller/storage/capability" + "kubesphere.io/kubesphere/pkg/controller/user" "kubesphere.io/kubesphere/pkg/controller/virtualservice" + "kubesphere.io/kubesphere/pkg/controller/workspace" + "kubesphere.io/kubesphere/pkg/controller/workspacerole" + "kubesphere.io/kubesphere/pkg/controller/workspacerolebinding" + "kubesphere.io/kubesphere/pkg/controller/workspacetemplate" "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models/kubeconfig" + "kubesphere.io/kubesphere/pkg/simple/client/devops" + "kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins" "kubesphere.io/kubesphere/pkg/simple/client/k8s" + ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" ippoolclient "kubesphere.io/kubesphere/pkg/simple/client/network/ippool" + "kubesphere.io/kubesphere/pkg/simple/client/s3" ) var allControllers = []string{ @@ -563,7 +562,7 @@ func addAllControllers(mgr manager.Manager, client k8s.Client, informerFactory i return nil } -var addSuccessfullyControllers = sets.NewString() +var addSuccessfullyControllers = sets.New[string]() type setupableController interface { SetupWithManager(mgr ctrl.Manager) error diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index 6fe6e919b..8298d4eb1 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -22,26 +22,21 @@ import ( "strings" "time" - "kubesphere.io/kubesphere/pkg/simple/client/alerting" - "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus" - - controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config" - - "k8s.io/apimachinery/pkg/util/sets" - - "kubesphere.io/kubesphere/pkg/apiserver/authentication" - - "k8s.io/apimachinery/pkg/labels" - "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/leaderelection" cliflag "k8s.io/component-base/cli/flag" "k8s.io/klog/v2" + "kubesphere.io/kubesphere/pkg/apiserver/authentication" + controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config" + "kubesphere.io/kubesphere/pkg/simple/client/alerting" "kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins" "kubesphere.io/kubesphere/pkg/simple/client/gateway" "kubesphere.io/kubesphere/pkg/simple/client/k8s" ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus" "kubesphere.io/kubesphere/pkg/simple/client/multicluster" "kubesphere.io/kubesphere/pkg/simple/client/network" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" @@ -186,7 +181,7 @@ func (o *KubeSphereControllerManagerOptions) Validate(allControllerNameSelectors } // genetic option: controllers, check all selectors are valid - allControllersNameSet := sets.NewString(allControllerNameSelectors...) + allControllersNameSet := sets.New(allControllerNameSelectors...) for _, selector := range o.ControllerGates { if selector == "*" { continue diff --git a/kube/pkg/apis/core/v1/helper/qos/qos.go b/kube/pkg/apis/core/v1/helper/qos/qos.go index 3779f7d5a..d932b17ca 100644 --- a/kube/pkg/apis/core/v1/helper/qos/qos.go +++ b/kube/pkg/apis/core/v1/helper/qos/qos.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory)) +var supportedQoSComputeResources = sets.New(string(corev1.ResourceCPU), string(corev1.ResourceMemory)) // QOSList is a set of (resource name, QoS class) pairs. type QOSList map[corev1.ResourceName]corev1.PodQOSClass @@ -62,7 +62,7 @@ func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass { } } // process limits - qosLimitsFound := sets.NewString() + qosLimitsFound := sets.New[string]() for name, quantity := range container.Resources.Limits { if !isSupportedQoSComputeResource(name) { continue diff --git a/kube/pkg/quota/v1/evaluator/core/pods.go b/kube/pkg/quota/v1/evaluator/core/pods.go index 284ca8061..9a4acaf63 100644 --- a/kube/pkg/quota/v1/evaluator/core/pods.go +++ b/kube/pkg/quota/v1/evaluator/core/pods.go @@ -33,7 +33,7 @@ import ( "kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper" "kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper/qos" - quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1" "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" ) @@ -86,7 +86,7 @@ func isExtendedResourceNameForQuota(name corev1.ResourceName) bool { // the incoming pod is required to have those values set. we should not repeat // this mistake for other future resources (gpus, ephemeral-storage,etc). // do not add more resources to this list! -var validationSet = sets.NewString( +var validationSet = sets.New( string(corev1.ResourceCPU), string(corev1.ResourceMemory), string(corev1.ResourceRequestsCPU), @@ -123,7 +123,7 @@ func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime. // validation with resource counting, but we did this before QoS was even defined. // let's not make that mistake again with other resources now that QoS is defined. requiredSet := quota.ToSet(required).Intersection(validationSet) - missingSet := sets.NewString() + missingSet := sets.New[string]() for i := range pod.Spec.Containers { enforcePodContainerConstraints(&pod.Spec.Containers[i], requiredSet, missingSet) } @@ -133,7 +133,7 @@ func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime. if len(missingSet) == 0 { return nil } - return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) + return fmt.Errorf("must specify %s", strings.Join(missingSet.UnsortedList(), ",")) } // GroupResource that this evaluator tracks @@ -220,14 +220,14 @@ var _ quota.Evaluator = &podEvaluator{} // enforcePodContainerConstraints checks for required resources that are not set on this container and // adds them to missingSet. -func enforcePodContainerConstraints(container *corev1.Container, requiredSet, missingSet sets.String) { +func enforcePodContainerConstraints(container *corev1.Container, requiredSet, missingSet sets.Set[string]) { requests := container.Resources.Requests limits := container.Resources.Limits containerUsage := podComputeUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) - missingSet.Insert(difference.List()...) + missingSet.Insert(difference.UnsortedList()...) } } diff --git a/kube/pkg/quota/v1/resources.go b/kube/pkg/quota/v1/resources.go index 9b217489d..78600b721 100644 --- a/kube/pkg/quota/v1/resources.go +++ b/kube/pkg/quota/v1/resources.go @@ -239,8 +239,8 @@ func IsNegative(a corev1.ResourceList) []corev1.ResourceName { } // ToSet takes a list of resource names and converts to a string set -func ToSet(resourceNames []corev1.ResourceName) sets.String { - result := sets.NewString() +func ToSet(resourceNames []corev1.ResourceName) sets.Set[string] { + result := sets.New[string]() for _, resourceName := range resourceNames { result.Insert(string(resourceName)) } diff --git a/kube/plugin/pkg/admission/resourcequota/controller.go b/kube/plugin/pkg/admission/resourcequota/controller.go index a77bce1b9..ff373cc16 100644 --- a/kube/plugin/pkg/admission/resourcequota/controller.go +++ b/kube/plugin/pkg/admission/resourcequota/controller.go @@ -36,7 +36,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/util/workqueue" - quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1" "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -66,7 +66,7 @@ type quotaEvaluator struct { workLock sync.Mutex work map[string][]*admissionWaiter dirtyWork map[string][]*admissionWaiter - inProgress sets.String + inProgress sets.Set[string] // controls the run method so that we can cleanly conform to the Evaluator interface workers int @@ -126,7 +126,7 @@ func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema. queue: workqueue.NewNamed("admission_quota_controller"), work: map[string][]*admissionWaiter{}, dirtyWork: map[string][]*admissionWaiter{}, - inProgress: sets.String{}, + inProgress: sets.New[string](), workers: workers, stopCh: stopCh, @@ -432,7 +432,7 @@ func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluat // track the cumulative set of resources that were required across all quotas // this is needed to know if we have satisfied any constraints where consumption // was limited by default. - restrictedResourcesSet := sets.String{} + restrictedResourcesSet := sets.New[string]() restrictedScopes := []corev1.ScopedResourceSelectorRequirement{} for i := range quotas { resourceQuota := quotas[i] @@ -462,7 +462,7 @@ func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluat } interestingQuotaIndexes = append(interestingQuotaIndexes, i) localRestrictedResourcesSet := quota.ToSet(restrictedResources) - restrictedResourcesSet.Insert(localRestrictedResourcesSet.List()...) + restrictedResourcesSet.Insert(localRestrictedResourcesSet.UnsortedList()...) } // Usage of some resources cannot be counted in isolation. For example, when @@ -516,7 +516,7 @@ func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluat // if not, we reject the request. hasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet) if len(hasNoCoveringQuota) > 0 { - return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ","))) + return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.UnsortedList(), ","))) } // verify that for every scope that had limited access enabled diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 0c3b8d380..7836dfc4a 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -309,8 +309,8 @@ func (s *APIServer) Run(ctx context.Context) (err error) { func (s *APIServer) buildHandlerChain(stopCh <-chan struct{}) { requestInfoResolver := &request.RequestInfoFactory{ - APIPrefixes: sets.NewString("api", "apis", "kapis", "kapi"), - GrouplessAPIPrefixes: sets.NewString("api", "kapi"), + APIPrefixes: sets.New("api", "apis", "kapis", "kapi"), + GrouplessAPIPrefixes: sets.New("api", "kapi"), GlobalResources: []schema.GroupResource{ iamv1alpha2.Resource(iamv1alpha2.ResourcesPluralUser), iamv1alpha2.Resource(iamv1alpha2.ResourcesPluralGlobalRole), diff --git a/pkg/apiserver/authorization/path/path.go b/pkg/apiserver/authorization/path/path.go index 3b942ad5a..502683c8e 100644 --- a/pkg/apiserver/authorization/path/path.go +++ b/pkg/apiserver/authorization/path/path.go @@ -29,7 +29,7 @@ import ( // Each path is either a fully matching path or it ends in * in case a prefix match is done. A leading / is optional. func NewAuthorizer(alwaysAllowPaths []string) (authorizer.Authorizer, error) { var prefixes []string - paths := sets.NewString() + paths := sets.New[string]() for _, p := range alwaysAllowPaths { p = strings.TrimPrefix(p, "/") if len(p) == 0 { diff --git a/pkg/apiserver/request/requestinfo.go b/pkg/apiserver/request/requestinfo.go index 3c2976542..f7b10c78b 100644 --- a/pkg/apiserver/request/requestinfo.go +++ b/pkg/apiserver/request/requestinfo.go @@ -25,8 +25,6 @@ import ( "net/http" "strings" - "kubesphere.io/kubesphere/pkg/utils/iputil" - "k8s.io/apimachinery/pkg/api/validation/path" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" @@ -38,6 +36,7 @@ import ( "kubesphere.io/kubesphere/pkg/api" "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/utils/iputil" ) type RequestInfoResolver interface { @@ -47,16 +46,16 @@ type RequestInfoResolver interface { // specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal // CRUDdy GET/POST/PUT/DELETE actions on REST objects. // master's Mux. -var specialVerbs = sets.NewString("proxy", "watch") +var specialVerbs = sets.New("proxy", "watch") // specialVerbsNoSubresources contains root verbs which do not allow subresources -var specialVerbsNoSubresources = sets.NewString("proxy") +var specialVerbsNoSubresources = sets.New("proxy") // namespaceSubresources contains subresources of namespace // this list allows the parser to distinguish between a namespace subresource, and a namespaced resource -var namespaceSubresources = sets.NewString("status", "finalize") +var namespaceSubresources = sets.New("status", "finalize") -var kubernetesAPIPrefixes = sets.NewString("api", "apis") +var kubernetesAPIPrefixes = sets.New("api", "apis") // RequestInfo holds information parsed from the http.Request, // extended from k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -86,8 +85,8 @@ type RequestInfo struct { } type RequestInfoFactory struct { - APIPrefixes sets.String - GrouplessAPIPrefixes sets.String + APIPrefixes sets.Set[string] + GrouplessAPIPrefixes sets.Set[string] GlobalResources []schema.GroupResource } diff --git a/pkg/apiserver/request/requestinfo_test.go b/pkg/apiserver/request/requestinfo_test.go index ca5fdf8c4..9213dc1c3 100644 --- a/pkg/apiserver/request/requestinfo_test.go +++ b/pkg/apiserver/request/requestinfo_test.go @@ -25,8 +25,8 @@ import ( func newTestRequestInfoResolver() RequestInfoResolver { requestInfoResolver := &RequestInfoFactory{ - APIPrefixes: sets.NewString("api", "apis", "kapis", "kapi"), - GrouplessAPIPrefixes: sets.NewString("api", "kapi"), + APIPrefixes: sets.New("api", "apis", "kapis", "kapi"), + GrouplessAPIPrefixes: sets.New("api", "kapi"), } return requestInfoResolver diff --git a/pkg/controller/cluster/cluster_controller.go b/pkg/controller/cluster/cluster_controller.go index 86983b232..284183141 100644 --- a/pkg/controller/cluster/cluster_controller.go +++ b/pkg/controller/cluster/cluster_controller.go @@ -330,7 +330,7 @@ func (c *clusterController) syncCluster(key string) error { // The object is not being deleted, so if it does not have our finalizer, // then lets add the finalizer and update the object. This is equivalent // registering our finalizer. - if !sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) { + if !sets.New(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) { cluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterv1alpha1.Finalizer) if cluster, err = c.ksClient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil { return err @@ -338,7 +338,7 @@ func (c *clusterController) syncCluster(key string) error { } } else { // The object is being deleted - if sets.NewString(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) { + if sets.New(cluster.ObjectMeta.Finalizers...).Has(clusterv1alpha1.Finalizer) { // need to unJoin federation first, before there are // some cleanup work to do in member cluster which depends // agent to proxy traffic @@ -361,9 +361,9 @@ func (c *clusterController) syncCluster(key string) error { } // remove our cluster finalizer - finalizers := sets.NewString(cluster.ObjectMeta.Finalizers...) + finalizers := sets.New(cluster.ObjectMeta.Finalizers...) finalizers.Delete(clusterv1alpha1.Finalizer) - cluster.ObjectMeta.Finalizers = finalizers.List() + cluster.ObjectMeta.Finalizers = finalizers.UnsortedList() if _, err = c.ksClient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{}); err != nil { return err } @@ -817,7 +817,7 @@ func (c *clusterController) syncClusterMembers(clusterClient *kubernetes.Clients return fmt.Errorf("failed to list users: %s", err) } - grantedUsers := sets.NewString() + grantedUsers := sets.New[string]() clusterName := cluster.Name if cluster.DeletionTimestamp.IsZero() { list, err := clusterClient.RbacV1().ClusterRoleBindings().List(context.Background(), @@ -837,18 +837,18 @@ func (c *clusterController) syncClusterMembers(clusterClient *kubernetes.Clients for _, user := range users { user = user.DeepCopy() grantedClustersAnnotation := user.Annotations[iamv1alpha2.GrantedClustersAnnotation] - var grantedClusters sets.String + var grantedClusters sets.Set[string] if len(grantedClustersAnnotation) > 0 { - grantedClusters = sets.NewString(strings.Split(grantedClustersAnnotation, ",")...) + grantedClusters = sets.New(strings.Split(grantedClustersAnnotation, ",")...) } else { - grantedClusters = sets.NewString() + grantedClusters = sets.New[string]() } if grantedUsers.Has(user.Name) && !grantedClusters.Has(clusterName) { grantedClusters.Insert(clusterName) } else if !grantedUsers.Has(user.Name) && grantedClusters.Has(clusterName) { grantedClusters.Delete(clusterName) } - grantedClustersAnnotation = strings.Join(grantedClusters.List(), ",") + grantedClustersAnnotation = strings.Join(grantedClusters.UnsortedList(), ",") if user.Annotations[iamv1alpha2.GrantedClustersAnnotation] != grantedClustersAnnotation { if user.Annotations == nil { user.Annotations = make(map[string]string, 0) diff --git a/pkg/controller/destinationrule/destinationrule_controller.go b/pkg/controller/destinationrule/destinationrule_controller.go index fdf3fabd2..f41e04e26 100644 --- a/pkg/controller/destinationrule/destinationrule_controller.go +++ b/pkg/controller/destinationrule/destinationrule_controller.go @@ -20,13 +20,13 @@ import ( "context" "fmt" "reflect" - - "kubesphere.io/kubesphere/pkg/controller/utils/servicemesh" - "time" apinetworkingv1alpha3 "istio.io/api/networking/v1alpha3" clientgonetworkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + istioclientset "istio.io/client-go/pkg/clientset/versioned" + istioinformers "istio.io/client-go/pkg/informers/externalversions/networking/v1alpha3" + istiolisters "istio.io/client-go/pkg/listers/networking/v1alpha3" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -36,27 +36,24 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes/scheme" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/klog/v2" - - servicemeshv1alpha2 "kubesphere.io/api/servicemesh/v1alpha2" - - istioclientset "istio.io/client-go/pkg/clientset/versioned" - istioinformers "istio.io/client-go/pkg/informers/externalversions/networking/v1alpha3" - istiolisters "istio.io/client-go/pkg/listers/networking/v1alpha3" informersv1 "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" listersv1 "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + servicemeshv1alpha2 "kubesphere.io/api/servicemesh/v1alpha2" servicemeshclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned" servicemeshinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh/v1alpha2" servicemeshlisters "kubesphere.io/kubesphere/pkg/client/listers/servicemesh/v1alpha2" + "kubesphere.io/kubesphere/pkg/controller/utils/servicemesh" ) const ( @@ -426,8 +423,8 @@ func (v *DestinationRuleController) deleteDeployment(obj interface{}) { v.addDeployment(deploy) } -func (v *DestinationRuleController) getDeploymentServiceMemberShip(deployment *appsv1.Deployment) (sets.String, error) { - set := sets.String{} +func (v *DestinationRuleController) getDeploymentServiceMemberShip(deployment *appsv1.Deployment) (sets.Set[string], error) { + set := sets.New[string]() allServices, err := v.serviceLister.Services(deployment.Namespace).List(labels.Everything()) if err != nil { @@ -467,7 +464,7 @@ func (v *DestinationRuleController) addServicePolicy(obj interface{}) { return } - set := sets.String{} + set := sets.New[string]() for _, service := range services { key, err := cache.MetaNamespaceKeyFunc(service) if err != nil { diff --git a/pkg/controller/virtualservice/virtualservice_controller.go b/pkg/controller/virtualservice/virtualservice_controller.go index cd6e91f36..e955cfb09 100644 --- a/pkg/controller/virtualservice/virtualservice_controller.go +++ b/pkg/controller/virtualservice/virtualservice_controller.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "reflect" + "time" apinetworkingv1alpha3 "istio.io/api/networking/v1alpha3" clientgonetworkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" @@ -50,8 +51,6 @@ import ( servicemeshinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh/v1alpha2" servicemeshlisters "kubesphere.io/kubesphere/pkg/client/listers/servicemesh/v1alpha2" "kubesphere.io/kubesphere/pkg/controller/utils/servicemesh" - - "time" ) const ( @@ -328,7 +327,7 @@ func (v *VirtualServiceController) syncService(key string) error { case servicemeshv1alpha2.PolicyWaitForWorkloadReady: set := v.getSubsets(strategies[0]) - setNames := sets.String{} + setNames := sets.New[string]() for i := range subsets { setNames.Insert(subsets[i].Name) } @@ -441,7 +440,7 @@ func (v *VirtualServiceController) addStrategy(obj interface{}) { } // avoid insert a key multiple times - set := sets.String{} + set := sets.New[string]() for i := range allServices { service := allServices[i] @@ -480,8 +479,8 @@ func (v *VirtualServiceController) handleErr(err error, key interface{}) { utilruntime.HandleError(err) } -func (v *VirtualServiceController) getSubsets(strategy *servicemeshv1alpha2.Strategy) sets.String { - set := sets.String{} +func (v *VirtualServiceController) getSubsets(strategy *servicemeshv1alpha2.Strategy) sets.Set[string] { + set := sets.New[string]() for _, httpRoute := range strategy.Spec.Template.Spec.Http { for _, dw := range httpRoute.Route { diff --git a/pkg/models/tenant/tenant.go b/pkg/models/tenant/tenant.go index 3582d3a5c..56b34d481 100644 --- a/pkg/models/tenant/tenant.go +++ b/pkg/models/tenant/tenant.go @@ -478,7 +478,7 @@ func (t *tenantOperator) PatchNamespace(workspace string, namespace *corev1.Name func (t *tenantOperator) PatchWorkspaceTemplate(user user.Info, workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error) { var manageWorkspaceTemplateRequest bool - clusterNames := sets.NewString() + clusterNames := sets.New[string]() patchs, err := jsonpatchutil.Parse(data) if err != nil { @@ -542,7 +542,7 @@ func (t *tenantOperator) PatchWorkspaceTemplate(user user.Info, workspace string } if clusterNames.Len() > 0 { - err := t.checkClusterPermission(user, clusterNames.List()) + err := t.checkClusterPermission(user, clusterNames.UnsortedList()) if err != nil { klog.Error(err) return nil, err @@ -661,14 +661,14 @@ func (t *tenantOperator) ListClusters(user user.Info, queryParam *query.Query) ( } grantedClustersAnnotation := userDetail.Annotations[iamv1alpha2.GrantedClustersAnnotation] - var grantedClusters sets.String + var grantedClusters sets.Set[string] if len(grantedClustersAnnotation) > 0 { - grantedClusters = sets.NewString(strings.Split(grantedClustersAnnotation, ",")...) + grantedClusters = sets.New(strings.Split(grantedClustersAnnotation, ",")...) } else { - grantedClusters = sets.NewString() + grantedClusters = sets.New[string]() } var clusters []*clusterv1alpha1.Cluster - for _, grantedCluster := range grantedClusters.List() { + for _, grantedCluster := range grantedClusters.UnsortedList() { obj, err := t.resourceGetter.Get(clusterv1alpha1.ResourcesPluralCluster, "", grantedCluster) if err != nil { if errors.IsNotFound(err) { diff --git a/pkg/server/healthz/healthz.go b/pkg/server/healthz/healthz.go index fa8d85346..5256c7026 100644 --- a/pkg/server/healthz/healthz.go +++ b/pkg/server/healthz/healthz.go @@ -86,9 +86,9 @@ func handleRootHealth(name string, firstTimeHealthy func(), checks ...HealthChec } } if excluded.Len() > 0 { - fmt.Fprintf(&individualCheckOutput, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(excluded.List()...)) + fmt.Fprintf(&individualCheckOutput, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(excluded.UnsortedList()...)) klog.Warningf("cannot exclude some health checks, no health checks are installed matching %s", - formatQuoted(excluded.List()...)) + formatQuoted(excluded.UnsortedList()...)) } // always be verbose on failure if len(failedChecks) > 0 { @@ -137,12 +137,12 @@ type HealthChecker interface { } // getExcludedChecks extracts the health check names to be excluded from the query param -func getExcludedChecks(r *http.Request) sets.String { +func getExcludedChecks(r *http.Request) sets.Set[string] { checks, found := r.URL.Query()["exclude"] if found { - return sets.NewString(checks...) + return sets.New(checks...) } - return sets.NewString() + return sets.New[string]() } // PingHealthz returns true automatically when checked diff --git a/pkg/simple/client/cache/inmemory_cache_test.go b/pkg/simple/client/cache/inmemory_cache_test.go index 0a268a674..f454dfc71 100644 --- a/pkg/simple/client/cache/inmemory_cache_test.go +++ b/pkg/simple/client/cache/inmemory_cache_test.go @@ -66,8 +66,8 @@ func dump(client Interface) (map[string]string, error) { func TestDeleteAndExpireCache(t *testing.T) { var testCases = []struct { description string - deleteKeys sets.String - expireKeys sets.String + deleteKeys sets.Set[string] + expireKeys sets.Set[string] expireDuration time.Duration // never use a 0(NeverExpires) duration with expireKeys, recommend time.Millisecond * 500. expected map[string]string }{ @@ -88,7 +88,7 @@ func TestDeleteAndExpireCache(t *testing.T) { "foo2": "val2", "foo3": "val3", }, - deleteKeys: sets.NewString("bar1", "bar2"), + deleteKeys: sets.New("bar1", "bar2"), }, { description: "Should get only keys start with bar", @@ -97,7 +97,7 @@ func TestDeleteAndExpireCache(t *testing.T) { "bar2": "val2", }, expireDuration: time.Millisecond * 500, - expireKeys: sets.NewString("foo1", "foo2", "foo3"), + expireKeys: sets.New("foo1", "foo2", "foo3"), }, } @@ -111,14 +111,14 @@ func TestDeleteAndExpireCache(t *testing.T) { } if len(testCase.deleteKeys) != 0 { - err = cacheClient.Del(testCase.deleteKeys.List()...) + err = cacheClient.Del(testCase.deleteKeys.UnsortedList()...) if err != nil { t.Fatalf("Error delete keys, %v", err) } } if len(testCase.expireKeys) != 0 && testCase.expireDuration != 0 { - for _, key := range testCase.expireKeys.List() { + for _, key := range testCase.expireKeys.UnsortedList() { err = cacheClient.Expire(key, testCase.expireDuration) if err != nil { t.Fatalf("Error expire keys, %v", err)