diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index e93c87065..2434dab33 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -18,6 +18,7 @@ package app import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "kubesphere.io/kubesphere/pkg/controller/application" "os" @@ -32,6 +33,7 @@ import ( controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config" "kubesphere.io/kubesphere/pkg/controller/namespace" "kubesphere.io/kubesphere/pkg/controller/network/webhooks" + "kubesphere.io/kubesphere/pkg/controller/quota" "kubesphere.io/kubesphere/pkg/controller/serviceaccount" "kubesphere.io/kubesphere/pkg/controller/user" "kubesphere.io/kubesphere/pkg/controller/workspace" @@ -194,29 +196,32 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) klog.Fatalf("unable add APIs to scheme: %v", err) } + // register common meta types into schemas. + metav1.AddToGroupVersion(mgr.GetScheme(), metav1.SchemeGroupVersion) + workspaceTemplateReconciler := &workspacetemplate.Reconciler{MultiClusterEnabled: s.MultiClusterOptions.Enable} if err = workspaceTemplateReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create workspace template controller") + klog.Fatalf("Unable to create workspace template controller: %v", err) } workspaceReconciler := &workspace.Reconciler{} if err = workspaceReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create workspace controller") + klog.Fatalf("Unable to create workspace controller: %v", err) } workspaceRoleReconciler := &workspacerole.Reconciler{MultiClusterEnabled: s.MultiClusterOptions.Enable} if err = workspaceRoleReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create workspace role controller") + klog.Fatalf("Unable to create workspace role controller: %v", err) } workspaceRoleBindingReconciler := &workspacerolebinding.Reconciler{MultiClusterEnabled: s.MultiClusterOptions.Enable} if err = workspaceRoleBindingReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create workspace role binding controller") + klog.Fatalf("Unable to create workspace role binding controller: %v", err) } namespaceReconciler := &namespace.Reconciler{} if err = namespaceReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create namespace controller") + klog.Fatalf("Unable to create namespace controller: %v", err) } selector, _ := labels.Parse(s.ApplicationSelector) @@ -227,13 +232,17 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) ApplicationSelector: selector, } if err = applicationReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create application controller") + klog.Fatalf("Unable to create application controller: %v", err) } saReconciler := &serviceaccount.Reconciler{} - if err = saReconciler.SetupWithManager(mgr); err != nil { - klog.Fatal("Unable to create ServiceAccount controller") + klog.Fatalf("Unable to create ServiceAccount controller: %v", err) + } + + resourceQuotaReconciler := quota.Reconciler{} + if err := resourceQuotaReconciler.SetupWithManager(mgr, quota.DefaultMaxConcurrentReconciles, quota.DefaultResyncPeriod, informerFactory.KubernetesSharedInformerFactory()); err != nil { + klog.Fatalf("Unable to create ResourceQuota controller: %v", err) } // TODO(jeff): refactor config with CRD @@ -263,10 +272,16 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) hookServer := mgr.GetWebhookServer() klog.V(2).Info("registering webhooks to the webhook server") - hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}}) + hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}}) hookServer.Register("/validate-network-kubesphere-io-v1alpha1", &webhook.Admission{Handler: &webhooks.ValidatingHandler{C: mgr.GetClient()}}) hookServer.Register("/mutate-network-kubesphere-io-v1alpha1", &webhook.Admission{Handler: &webhooks.MutatingHandler{C: mgr.GetClient()}}) + resourceQuotaAdmission, err := quota.NewResourceQuotaAdmission(mgr.GetClient(), mgr.GetScheme()) + if err != nil { + klog.Fatalf("unable to create resource quota admission: %v", err) + } + hookServer.Register("/validate-quota-kubesphere-io-v1alpha2", &webhook.Admission{Handler: resourceQuotaAdmission}) + klog.V(2).Info("registering metrics to the webhook server") hookServer.Register("/metrics", metrics.Handler()) diff --git a/config/crds/quota.kubesphere.io_resourcequotas.yaml b/config/crds/quota.kubesphere.io_resourcequotas.yaml new file mode 100644 index 000000000..4b1ba01b6 --- /dev/null +++ b/config/crds/quota.kubesphere.io_resourcequotas.yaml @@ -0,0 +1,170 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: resourcequotas.quota.kubesphere.io +spec: + group: quota.kubesphere.io + names: + categories: + - quota + kind: ResourceQuota + listKind: ResourceQuotaList + plural: resourcequotas + singular: resourcequota + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: WorkspaceResourceQuota sets aggregate quota restrictions enforced + per workspace + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: Spec defines the desired quota + properties: + quota: + description: Quota defines the desired quota + properties: + hard: + additionalProperties: + type: string + description: 'hard is the set of desired hard limits for each named + resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + scopeSelector: + description: scopeSelector is also a collection of filters like + scopes that must match each object tracked by a quota but expressed + using ScopeSelectorOperator in combination with possible values. + For a resource to match, both scopes AND scopeSelector (if specified + in spec), must be matched. + properties: + matchExpressions: + description: A list of scope selector requirements by scope + of the resources. + items: + description: A scoped-resource selector requirement is a selector + that contains values, a scope name, and an operator that + relates the scope name and values. + properties: + operator: + description: Represents a scope's relationship to a set + of values. Valid operators are In, NotIn, Exists, DoesNotExist. + type: string + scopeName: + description: The name of the scope that the selector applies + to. + type: string + values: + description: An array of string values. If the operator + is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - operator + - scopeName + type: object + type: array + type: object + scopes: + description: A collection of filters that must match each object + tracked by a quota. If not specified, the quota matches all objects. + items: + description: A ResourceQuotaScope defines a filter that must match + each object tracked by a quota + type: string + type: array + type: object + selector: + additionalProperties: + type: string + description: LabelSelector is used to select projects by label. + type: object + required: + - quota + - selector + type: object + status: + description: Status defines the actual enforced quota and its current usage + properties: + namespaces: + description: Namespaces slices the usage by project. + items: + description: ResourceQuotaStatusByNamespace gives status for a particular + project + properties: + hard: + additionalProperties: + type: string + description: 'Hard is the set of enforced hard limits for each + named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + namespace: + description: Namespace the project this status applies to + type: string + used: + additionalProperties: + type: string + description: Used is the current observed total usage of the resource + in the namespace. + type: object + required: + - namespace + type: object + type: array + total: + description: Total defines the actual enforced quota and its current + usage across all projects + properties: + hard: + additionalProperties: + type: string + description: 'Hard is the set of enforced hard limits for each named + resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + used: + additionalProperties: + type: string + description: Used is the current observed total usage of the resource + in the namespace. + type: object + type: object + required: + - namespaces + - total + type: object + required: + - spec + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/webhook/iam.yaml b/config/webhook/iam.yaml index 58eae0b52..3f72946ec 100644 --- a/config/webhook/iam.yaml +++ b/config/webhook/iam.yaml @@ -8,9 +8,9 @@ webhooks: clientConfig: caBundle: service: - name: webhook-service + name: ks-controller-manager namespace: kubesphere-system - path: /validate-email-iam-kubesphere-io-v1alpha2-user + path: /validate-email-iam-kubesphere-io-v1alpha2 failurePolicy: Fail name: vemail.iam.kubesphere.io rules: @@ -22,19 +22,4 @@ webhooks: - CREATE - UPDATE resources: - - users - ---- - -apiVersion: v1 -kind: Service -metadata: - name: webhook-service - namespace: kubesphere-system -spec: - ports: - - port: 443 - targetPort: 443 - selector: - app: ks-controller-manager - tier: backend \ No newline at end of file + - users \ No newline at end of file diff --git a/config/webhook/ks-controller-manager.yaml b/config/webhook/ks-controller-manager.yaml new file mode 100644 index 000000000..5cc157e62 --- /dev/null +++ b/config/webhook/ks-controller-manager.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: ks-controller-manager + namespace: kubesphere-system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 8443 + selector: + app: ks-controller-manager + tier: backend \ No newline at end of file diff --git a/config/webhook/nsnp.yaml b/config/webhook/network.yaml similarity index 82% rename from config/webhook/nsnp.yaml rename to config/webhook/network.yaml index 48aaec0b9..aeb562f4c 100644 --- a/config/webhook/nsnp.yaml +++ b/config/webhook/network.yaml @@ -7,9 +7,9 @@ webhooks: - clientConfig: caBundle: service: - name: kubesphere-controller-manager-service + name: ks-controller-manager namespace: kubesphere-system - path: /validate-nsnp-kubesphere-io-v1alpha1-network + path: /validate-network-kubesphere-io-v1alpha1 failurePolicy: Fail name: validate.nsnp.kubesphere.io rules: diff --git a/config/webhook/quota.yaml b/config/webhook/quota.yaml new file mode 100644 index 000000000..51236c126 --- /dev/null +++ b/config/webhook/quota.yaml @@ -0,0 +1,30 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: resourcesquotas.quota.kubesphere.io +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: + service: + name: ks-controller-manager + namespace: kubesphere-system + path: /validate-quota-kubesphere-io-v1alpha2 + port: 443 + failurePolicy: Ignore + matchPolicy: Exact + name: resourcesquotas.quota.kubesphere.io + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + resources: + - pods + scope: '*' + sideEffects: None \ No newline at end of file diff --git a/go.mod b/go.mod index 5c3196d49..eb1afff07 100644 --- a/go.mod +++ b/go.mod @@ -43,6 +43,7 @@ require ( github.com/google/go-cmp v0.5.0 github.com/google/uuid v1.1.1 github.com/gorilla/websocket v1.4.1 + github.com/hashicorp/golang-lru v0.5.4 github.com/json-iterator/go v1.1.10 github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/kr/text v0.2.0 // indirect @@ -733,7 +734,6 @@ replace ( gopkg.in/tchap/go-patricia.v2 => gopkg.in/tchap/go-patricia.v2 v2.2.6 gopkg.in/tomb.v1 => gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/warnings.v0 => gopkg.in/warnings.v0 v0.1.2 - gopkg.in/yaml.v1 => gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gotest.tools => gotest.tools v2.2.0+incompatible diff --git a/hack/generate_client.sh b/hack/generate_client.sh index 94ae37d3f..fced9d970 100755 --- a/hack/generate_client.sh +++ b/hack/generate_client.sh @@ -2,7 +2,7 @@ set -e -GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 tenant:v1alpha2 devops:v1alpha1 iam:v1alpha2 devops:v1alpha3 cluster:v1alpha1 storage:v1alpha1 auditing:v1alpha1 types:v1beta1" +GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 tenant:v1alpha2 devops:v1alpha1 iam:v1alpha2 devops:v1alpha3 cluster:v1alpha1 storage:v1alpha1 auditing:v1alpha1 types:v1beta1 quota:v1alpha2" rm -rf ./pkg/client ./hack/generate_group.sh "client,lister,informer" kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "$GV" --output-base=./ -h "$PWD/hack/boilerplate.go.txt" diff --git a/kube/pkg/api/v1/pod/util.go b/kube/pkg/api/v1/pod/util.go new file mode 100644 index 000000000..6825dfe29 --- /dev/null +++ b/kube/pkg/api/v1/pod/util.go @@ -0,0 +1,336 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "kubesphere.io/kubesphere/kube/pkg/features" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} + +// ContainerVisitor is called with each container spec, and returns true +// if visiting should continue. +type ContainerVisitor func(container *v1.Container) (shouldContinue bool) + +// VisitContainers invokes the visitor function with a pointer to the container +// spec of every container in the given pod spec. If visitor returns false, +// visiting is short-circuited. VisitContainers returns true if visiting completes, +// false if visiting was short-circuited. +func VisitContainers(podSpec *v1.PodSpec, visitor ContainerVisitor) bool { + for i := range podSpec.InitContainers { + if !visitor(&podSpec.InitContainers[i]) { + return false + } + } + for i := range podSpec.Containers { + if !visitor(&podSpec.Containers[i]) { + return false + } + } + if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) { + for i := range podSpec.EphemeralContainers { + if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon)) { + return false + } + } + } + return true +} + +// Visitor is called with each object name, and returns true if visiting should continue +type Visitor func(name string) (shouldContinue bool) + +// VisitPodSecretNames invokes the visitor function with the name of every secret +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { + for _, reference := range pod.Spec.ImagePullSecrets { + if !visitor(reference.Name) { + return false + } + } + VisitContainers(&pod.Spec, func(c *v1.Container) bool { + return visitContainerSecretNames(c, visitor) + }) + var source *v1.VolumeSource + + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.AzureFile != nil: + if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { + return false + } + case source.CephFS != nil: + if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { + return false + } + case source.Cinder != nil: + if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { + return false + } + case source.FlexVolume != nil: + if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { + return false + } + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].Secret != nil { + if !visitor(source.Projected.Sources[j].Secret.Name) { + return false + } + } + } + case source.RBD != nil: + if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { + return false + } + case source.Secret != nil: + if !visitor(source.Secret.SecretName) { + return false + } + case source.ScaleIO != nil: + if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { + return false + } + case source.ISCSI != nil: + if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { + return false + } + case source.StorageOS != nil: + if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { + return false + } + case source.CSI != nil: + if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) { + return false + } + } + } + return true +} + +func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.SecretRef != nil { + if !visitor(env.SecretRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { + if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { + return false + } + } + } + return true +} + +// VisitPodConfigmapNames invokes the visitor function with the name of every configmap +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { + VisitContainers(&pod.Spec, func(c *v1.Container) bool { + return visitContainerConfigmapNames(c, visitor) + }) + var source *v1.VolumeSource + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].ConfigMap != nil { + if !visitor(source.Projected.Sources[j].ConfigMap.Name) { + return false + } + } + } + case source.ConfigMap != nil: + if !visitor(source.ConfigMap.Name) { + return false + } + } + } + return true +} + +func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.ConfigMapRef != nil { + if !visitor(env.ConfigMapRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { + if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { + return false + } + } + } + return true +} + +// GetContainerStatus extracts the status of container "name" from "statuses". +// It also returns if "name" exists. +func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return v1.ContainerStatus{}, false +} + +// GetExistingContainerStatus extracts the status of container "name" from "statuses", +// It also returns if "name" exists. +func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { + status, _ := GetContainerStatus(statuses, name) + return status +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + return GetPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual +} + +// GetPodPriority returns priority of the given pod. +func GetPodPriority(pod *v1.Pod) int32 { + if pod.Spec.Priority != nil { + return *pod.Spec.Priority + } + // When priority of a running pod is nil, it means it was created at a time + // that there was no global default priority class and the priority class + // name of the pod was empty. So, we resolve to the static default priority. + return 0 +} diff --git a/kube/pkg/apis/core/helper/helpers.go b/kube/pkg/apis/core/helper/helpers.go new file mode 100644 index 000000000..0c16cb03b --- /dev/null +++ b/kube/pkg/apis/core/helper/helpers.go @@ -0,0 +1,51 @@ +/* + +Copyright 2021 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package helper + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for core objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) \ No newline at end of file diff --git a/kube/pkg/apis/core/v1/helper/helpers.go b/kube/pkg/apis/core/v1/helper/helpers.go new file mode 100644 index 000000000..02ccaec9b --- /dev/null +++ b/kube/pkg/apis/core/v1/helper/helpers.go @@ -0,0 +1,500 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/validation" + "kubesphere.io/kubesphere/kube/pkg/apis/core/helper" +) + +// IsExtendedResourceName returns true if: +// 1. the resource name is not in the default namespace; +// 2. resource name does not have "requests." prefix, +// to avoid confusion with the convention in quota +// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name +func IsExtendedResourceName(name v1.ResourceName) bool { + if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) { + return false + } + // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name + nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name)) + if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { + return false + } + return true +} + +// IsPrefixedNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. +func IsPrefixedNativeResource(name v1.ResourceName) bool { + return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix) +} + +// IsNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsNativeResource(name v1.ResourceName) bool { + return !strings.Contains(string(name), "/") || + IsPrefixedNativeResource(name) +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName { + return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and is not hugepages. +func IsOvercommitAllowed(name v1.ResourceName) bool { + return IsNativeResource(name) && + !IsHugePageResourceName(name) +} + +func IsAttachableVolumeResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix) +} + +// Extended and Hugepages resources +func IsScalarResourceName(name v1.ResourceName) bool { + return IsExtendedResourceName(name) || IsHugePageResourceName(name) || + IsPrefixedNativeResource(name) || IsAttachableVolumeResourceName(name) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *v1.Service) bool { + return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []v1.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, v1.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, v1.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, v1.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case v1.NodeSelectorOpIn: + op = selection.In + case v1.NodeSelectorOpNotIn: + op = selection.NotIn + case v1.NodeSelectorOpExists: + op = selection.Exists + case v1.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case v1.NodeSelectorOpGt: + op = selection.GreaterThan + case v1.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements +// fields.Selector. +func NodeSelectorRequirementsAsFieldSelector(nsm []v1.NodeSelectorRequirement) (fields.Selector, error) { + if len(nsm) == 0 { + return fields.Nothing(), nil + } + + selectors := []fields.Selector{} + for _, expr := range nsm { + switch expr.Operator { + case v1.NodeSelectorOpIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) + + case v1.NodeSelectorOpNotIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) + + default: + return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) + } + } + + return fields.AndSelectors(selectors...), nil +} + +// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms +func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool { + for _, req := range reqs { + for _, term := range terms { + for _, r := range term.MatchExpressions { + if r.Key == req.Key { + return true + } + } + } + } + return false +} + +// MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed; +// nil or empty term matches no objects. +func MatchNodeSelectorTerms( + nodeSelectorTerms []v1.NodeSelectorTerm, + nodeLabels labels.Set, + nodeFields fields.Set, +) bool { + for _, req := range nodeSelectorTerms { + // nil or empty term selects no objects + if len(req.MatchExpressions) == 0 && len(req.MatchFields) == 0 { + continue + } + + if len(req.MatchExpressions) != 0 { + labelSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions) + if err != nil || !labelSelector.Matches(nodeLabels) { + continue + } + } + + if len(req.MatchFields) != 0 { + fieldSelector, err := NodeSelectorRequirementsAsFieldSelector(req.MatchFields) + if err != nil || !fieldSelector.Matches(nodeFields) { + continue + } + } + + return true + } + + return false +} + +// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct +// that implements labels.Selector. +func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) { + if len(tsm) == 0 { + return labels.Nothing(), nil + } + + selector := labels.NewSelector() + for _, expr := range tsm { + r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + + return selector, nil +} + +// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed; +// nil or empty term matches no objects; while empty term list matches all objects. +func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool { + if len(topologySelectorTerms) == 0 { + // empty term list matches all objects + return true + } + + for _, req := range topologySelectorTerms { + // nil or empty term selects no objects + if len(req.MatchLabelExpressions) == 0 { + continue + } + + labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions) + if err != nil || !labelSelector.Matches(lbls) { + continue + } + + return true + } + + return false +} + +// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool { + podTolerations := spec.Tolerations + + var newTolerations []v1.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if helper.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + spec.Tolerations = newTolerations + return true +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { + return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*v1.Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { + if len(taints) == 0 { + return true, []v1.Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []v1.Toleration{} + } + result := []v1.Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []v1.Toleration{} + } + } + return true, result +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { + var avoidPods v1.AvoidPods + if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} diff --git a/kube/pkg/apis/core/v1/helper/qos/qos.go b/kube/pkg/apis/core/v1/helper/qos/qos.go new file mode 100644 index 000000000..3779f7d5a --- /dev/null +++ b/kube/pkg/apis/core/v1/helper/qos/qos.go @@ -0,0 +1,103 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package qos + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" +) + +var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory)) + +// QOSList is a set of (resource name, QoS class) pairs. +type QOSList map[corev1.ResourceName]corev1.PodQOSClass + +func isSupportedQoSComputeResource(name corev1.ResourceName) bool { + return supportedQoSComputeResources.Has(string(name)) +} + +// GetPodQOS returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass { + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + allContainers := []corev1.Container{} + allContainers = append(allContainers, pod.Spec.Containers...) + allContainers = append(allContainers, pod.Spec.InitContainers...) + for _, container := range allContainers { + // process requests + for name, quantity := range container.Resources.Requests { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.DeepCopy() + if _, exists := requests[name]; !exists { + requests[name] = delta + } else { + delta.Add(requests[name]) + requests[name] = delta + } + } + } + // process limits + qosLimitsFound := sets.NewString() + for name, quantity := range container.Resources.Limits { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosLimitsFound.Insert(string(name)) + delta := quantity.DeepCopy() + if _, exists := limits[name]; !exists { + limits[name] = delta + } else { + delta.Add(limits[name]) + limits[name] = delta + } + } + } + + if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return corev1.PodQOSBestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) { + return corev1.PodQOSGuaranteed + } + return corev1.PodQOSBurstable +} diff --git a/kube/pkg/features/features.go b/kube/pkg/features/features.go new file mode 100644 index 000000000..cede7dd54 --- /dev/null +++ b/kube/pkg/features/features.go @@ -0,0 +1,680 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "k8s.io/component-base/featuregate" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.X + // MyFeature featuregate.Feature = "MyFeature" + + // owner: @tallclair + // beta: v1.4 + AppArmor featuregate.Feature = "AppArmor" + + // owner: @mtaufen + // alpha: v1.4 + // beta: v1.11 + DynamicKubeletConfig featuregate.Feature = "DynamicKubeletConfig" + + // owner: @pweil- + // alpha: v1.5 + // + // Default userns=host for containers that are using other host namespaces, host mounts, the pod + // contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE, + // SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon. + ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting" + + // owner: @jiayingz + // beta: v1.10 + // + // Enables support for Device Plugins + DevicePlugins featuregate.Feature = "DevicePlugins" + + // owner: @dxist + // alpha: v1.16 + // + // Enables support of HPA scaling to zero pods when an object or custom metric is configured. + HPAScaleToZero featuregate.Feature = "HPAScaleToZero" + + // owner: @mikedanese + // alpha: v1.7 + // beta: v1.12 + // + // Gets a server certificate for the kubelet from the Certificate Signing + // Request API instead of generating one self signed and auto rotates the + // certificate as expiration approaches. + RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate" + + // owner: @jinxu + // beta: v1.10 + // + // New local storage types to support local storage capacity isolation + LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation" + + // owner: @gnufied + // beta: v1.11 + // Ability to Expand persistent volumes + ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes" + + // owner: @mlmhl + // beta: v1.15 + // Ability to expand persistent volumes' file system without unmounting volumes. + ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes" + + // owner: @gnufied + // alpha: v1.14 + // beta: v1.16 + // Ability to expand CSI volumes + ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes" + + // owner: @verb + // alpha: v1.16 + // + // Allows running an ephemeral container in pod namespaces to troubleshoot a running pod. + EphemeralContainers featuregate.Feature = "EphemeralContainers" + + // owner: @sjenning + // alpha: v1.11 + // + // Allows resource reservations at the QoS level preventing pods at lower QoS levels from + // bursting into resources requested at higher QoS levels (memory only for now) + QOSReserved featuregate.Feature = "QOSReserved" + + // owner: @ConnorDoyle + // alpha: v1.8 + // beta: v1.10 + // + // Alternative container-level CPU affinity policies. + CPUManager featuregate.Feature = "CPUManager" + + // owner: @szuecs + // alpha: v1.12 + // + // Enable nodes to change CPUCFSQuotaPeriod + CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod" + + // owner: @lmdaly + // alpha: v1.16 + // beta: v1.18 + // + // Enable resource managers to make NUMA aligned decisions + TopologyManager featuregate.Feature = "TopologyManager" + + // owner: @sjenning + // beta: v1.11 + // + // Enable pods to set sysctls on a pod + Sysctls featuregate.Feature = "Sysctls" + + // owner @smarterclayton + // alpha: v1.16 + // beta: v1.19 + // ga: v1.21 + // + // Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18. + // Lock to false in v1.21 and remove in v1.22. + LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior" + + // owner @brendandburns + // alpha: v1.9 + // beta: v1.19 + // ga: v1.21 + // + // Enable nodes to exclude themselves from service load balancers + ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion" + + // owner @smarterclayton + // alpha: v1.16 + // beta: v1.19 + // ga: v1.21 + // + // Enable nodes to exclude themselves from network disruption checks + NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion" + + // owner: @saad-ali + // alpha: v1.12 + // beta: v1.14 + // GA: v1.18 + // Enable all logic related to the CSIDriver API object in storage.k8s.io + CSIDriverRegistry featuregate.Feature = "CSIDriverRegistry" + + // owner: @screeley44 + // alpha: v1.9 + // beta: v1.13 + // ga: v1.18 + // + // Enable Block volume support in containers. + BlockVolume featuregate.Feature = "BlockVolume" + + // owner: @pospispa + // GA: v1.11 + // + // Postpone deletion of a PV or a PVC when they are being used + StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection" + + // owner: @dims, @derekwaynecarr + // alpha: v1.10 + // beta: v1.14 + // GA: v1.20 + // + // Implement support for limiting pids in pods + SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit" + + // owner: @mikedanese + // alpha: v1.13 + // + // Migrate ServiceAccount volumes to use a projected volume consisting of a + // ServiceAccountTokenVolumeProjection. This feature adds new required flags + // to the API server. + BoundServiceAccountTokenVolume featuregate.Feature = "BoundServiceAccountTokenVolume" + + // owner: @mtaufen + // alpha: v1.18 + // beta: v1.20 + // + // Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service + // account issuer in the API server. + // Note these endpoints serve minimally-compliant discovery docs that are + // intended to be used for service account token verification. + ServiceAccountIssuerDiscovery featuregate.Feature = "ServiceAccountIssuerDiscovery" + + // owner: @Random-Liu + // beta: v1.11 + // + // Enable container log rotation for cri container runtime + CRIContainerLogRotation featuregate.Feature = "CRIContainerLogRotation" + + // owner: @krmayankk + // beta: v1.14 + // + // Enables control over the primary group ID of containers' init processes. + RunAsGroup featuregate.Feature = "RunAsGroup" + + // owner: @saad-ali + // ga + // + // Allow mounting a subpath of a volume in a container + // Do not remove this feature gate even though it's GA + VolumeSubpath featuregate.Feature = "VolumeSubpath" + + // owner: @ravig + // alpha: v1.11 + // + // Include volume count on node to be considered for balanced resource allocation while scheduling. + // A node which has closer cpu,memory utilization and volume count is favoured by scheduler + // while making decisions. + BalanceAttachedNodeVolumes featuregate.Feature = "BalanceAttachedNodeVolumes" + + // owner: @vladimirvivien + // alpha: v1.11 + // beta: v1.14 + // ga: v1.18 + // + // Enables CSI to use raw block storage volumes + CSIBlockVolume featuregate.Feature = "CSIBlockVolume" + + // owner: @pohly + // alpha: v1.14 + // beta: v1.16 + // + // Enables CSI Inline volumes support for pods + CSIInlineVolume featuregate.Feature = "CSIInlineVolume" + + // owner: @pohly + // alpha: v1.19 + // + // Enables tracking of available storage capacity that CSI drivers provide. + CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity" + + // owner: @alculquicondor + // beta: v1.20 + // + // Enables the use of PodTopologySpread scheduling plugin to do default + // spreading and disables legacy SelectorSpread plugin. + DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread" + + // owner: @pohly + // alpha: v1.19 + // + // Enables generic ephemeral inline volume support for pods + GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume" + + // owner: @chendave + // alpha: v1.21 + // + // PreferNominatedNode tells scheduler whether the nominated node will be checked first before looping + // all the rest of nodes in the cluster. + // Enabling this feature also implies the preemptor pod might not be dispatched to the best candidate in + // some corner case, e.g. another node releases enough resources after the nominated node has been set + // and hence is the best candidate instead. + PreferNominatedNode featuregate.Feature = "PreferNominatedNode" + + // owner: @tallclair + // alpha: v1.12 + // beta: v1.14 + // GA: v1.20 + // + // Enables RuntimeClass, for selecting between multiple runtimes to run a pod. + RuntimeClass featuregate.Feature = "RuntimeClass" + + // owner: @mtaufen + // alpha: v1.12 + // beta: v1.14 + // GA: v1.17 + // + // Kubelet uses the new Lease API to report node heartbeats, + // (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal. + NodeLease featuregate.Feature = "NodeLease" + + // owner: @janosi + // alpha: v1.12 + // beta: v1.18 + // GA: v1.20 + // + // Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition + SCTPSupport featuregate.Feature = "SCTPSupport" + + // owner: @xing-yang + // alpha: v1.12 + // beta: v1.17 + // GA: v1.20 + // + // Enable volume snapshot data source support. + VolumeSnapshotDataSource featuregate.Feature = "VolumeSnapshotDataSource" + + // owner: @jessfraz + // alpha: v1.12 + // + // Enables control over ProcMountType for containers. + ProcMountType featuregate.Feature = "ProcMountType" + + // owner: @janetkuo + // alpha: v1.12 + // + // Allow TTL controller to clean up Pods and Jobs after they finish. + TTLAfterFinished featuregate.Feature = "TTLAfterFinished" + + // owner: @dashpole + // alpha: v1.13 + // beta: v1.15 + // + // Enables the kubelet's pod resources grpc endpoint + KubeletPodResources featuregate.Feature = "KubeletPodResources" + + // owner: @davidz627 + // alpha: v1.14 + // beta: v1.17 + // + // Enables the in-tree storage to CSI Plugin migration feature. + CSIMigration featuregate.Feature = "CSIMigration" + + // owner: @davidz627 + // alpha: v1.14 + // beta: v1.17 + // + // Enables the GCE PD in-tree driver to GCE CSI Driver migration feature. + CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE" + + // owner: @davidz627 + // alpha: v1.17 + // + // Disables the GCE PD in-tree driver. + // Expects GCE PD CSI Driver to be installed and configured on all nodes. + CSIMigrationGCEComplete featuregate.Feature = "CSIMigrationGCEComplete" + + // owner: @leakingtapan + // alpha: v1.14 + // beta: v1.17 + // + // Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature. + CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS" + + // owner: @leakingtapan + // alpha: v1.17 + // + // Disables the AWS EBS in-tree driver. + // Expects AWS EBS CSI Driver to be installed and configured on all nodes. + CSIMigrationAWSComplete featuregate.Feature = "CSIMigrationAWSComplete" + + // owner: @andyzhangx + // alpha: v1.15 + // beta: v1.19 + // + // Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature. + CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk" + + // owner: @andyzhangx + // alpha: v1.17 + // + // Disables the Azure Disk in-tree driver. + // Expects Azure Disk CSI Driver to be installed and configured on all nodes. + CSIMigrationAzureDiskComplete featuregate.Feature = "CSIMigrationAzureDiskComplete" + + // owner: @andyzhangx + // alpha: v1.15 + // + // Enables the Azure File in-tree driver to Azure File Driver migration feature. + CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile" + + // owner: @andyzhangx + // alpha: v1.17 + // + // Disables the Azure File in-tree driver. + // Expects Azure File CSI Driver to be installed and configured on all nodes. + CSIMigrationAzureFileComplete featuregate.Feature = "CSIMigrationAzureFileComplete" + + // owner: @divyenpatel + // beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15) + // + // Enables the vSphere in-tree driver to vSphere CSI Driver migration feature. + CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere" + + // owner: @divyenpatel + // beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15) + // + // Disables the vSphere in-tree driver. + // Expects vSphere CSI Driver to be installed and configured on all nodes. + CSIMigrationvSphereComplete featuregate.Feature = "CSIMigrationvSphereComplete" + + // owner: @huffmanca + // alpha: v1.19 + // beta: v1.20 + // + // Determines if a CSI Driver supports applying fsGroup. + CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy" + + // owner: @gnufied + // alpha: v1.18 + // beta: v1.20 + // Allows user to configure volume permission change policy for fsGroups when mounting + // a volume in a Pod. + ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy" + + // owner: @RobertKrawitz, @derekwaynecarr + // beta: v1.15 + // GA: v1.20 + // + // Implement support for limiting pids in nodes + SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit" + + // owner: @wk8 + // alpha: v1.14 + // beta: v1.16 + // + // Enables GMSA support for Windows workloads. + WindowsGMSA featuregate.Feature = "WindowsGMSA" + + // owner: @bclau + // alpha: v1.16 + // beta: v1.17 + // GA: v1.18 + // + // Enables support for running container entrypoints as different usernames than their default ones. + WindowsRunAsUserName featuregate.Feature = "WindowsRunAsUserName" + + // owner: @adisky + // alpha: v1.14 + // beta: v1.18 + // + // Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature. + CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack" + + // owner: @adisky + // alpha: v1.17 + // + // Disables the OpenStack Cinder in-tree driver. + // Expects the OpenStack Cinder CSI Driver to be installed and configured on all nodes. + CSIMigrationOpenStackComplete featuregate.Feature = "CSIMigrationOpenStackComplete" + + // owner: @RobertKrawitz + // alpha: v1.15 + // + // Allow use of filesystems for ephemeral storage monitoring. + // Only applies if LocalStorageCapacityIsolation is set. + LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring" + + // owner: @denkensk + // alpha: v1.15 + // beta: v1.19 + // + // Enables NonPreempting option for priorityClass and pod. + NonPreemptingPriority featuregate.Feature = "NonPreemptingPriority" + + // owner: @egernst + // alpha: v1.16 + // beta: v1.18 + // + // Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass + PodOverhead featuregate.Feature = "PodOverhead" + + // owner: @khenidak + // alpha: v1.15 + // + // Enables ipv6 dual stack + IPv6DualStack featuregate.Feature = "IPv6DualStack" + + // owner: @robscott @freehan + // alpha: v1.16 + // + // Enable Endpoint Slices for more scalable Service endpoints. + EndpointSlice featuregate.Feature = "EndpointSlice" + + // owner: @robscott @freehan + // alpha: v1.18 + // beta: v1.19 + // + // Enable Endpoint Slice consumption by kube-proxy for improved scalability. + EndpointSliceProxying featuregate.Feature = "EndpointSliceProxying" + + // owner: @robscott @kumarvin123 + // alpha: v1.19 + // + // Enable Endpoint Slice consumption by kube-proxy in Windows for improved scalability. + WindowsEndpointSliceProxying featuregate.Feature = "WindowsEndpointSliceProxying" + + // owner: @matthyx + // alpha: v1.16 + // beta: v1.18 + // GA: v1.20 + // + // Enables the startupProbe in kubelet worker. + StartupProbe featuregate.Feature = "StartupProbe" + + // owner: @deads2k + // beta: v1.17 + // + // Enables the users to skip TLS verification of kubelets on pod logs requests + AllowInsecureBackendProxy featuregate.Feature = "AllowInsecureBackendProxy" + + // owner: @mortent + // alpha: v1.3 + // beta: v1.5 + // + // Enable all logic related to the PodDisruptionBudget API object in policy + PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget" + + // owner: @alaypatel07, @soltysh + // alpha: v1.20 + // beta: v1.21 + // + // CronJobControllerV2 controls whether the controller manager starts old cronjob + // controller or new one which is implemented with informers and delaying queue + // + // This feature is deprecated, and will be removed in v1.22. + CronJobControllerV2 featuregate.Feature = "CronJobControllerV2" + + // owner: @smarterclayton + // alpha: v1.21 + // + // DaemonSets allow workloads to maintain availability during update per node + DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge" + + // owner: @m1093782566 + // alpha: v1.17 + // + // Enables topology aware service routing + ServiceTopology featuregate.Feature = "ServiceTopology" + + // owner: @robscott + // alpha: v1.18 + // beta: v1.19 + // ga: v1.20 + // + // Enables AppProtocol field for Services and Endpoints. + ServiceAppProtocol featuregate.Feature = "ServiceAppProtocol" + + // owner: @wojtek-t + // alpha: v1.18 + // beta: v1.19 + // ga: v1.21 + // + // Enables a feature to make secrets and configmaps data immutable. + ImmutableEphemeralVolumes featuregate.Feature = "ImmutableEphemeralVolumes" + + // owner: @bart0sh + // alpha: v1.18 + // beta: v1.19 + // + // Enables usage of HugePages- in a volume medium, + // e.g. emptyDir: + // medium: HugePages-1Gi + HugePageStorageMediumSize featuregate.Feature = "HugePageStorageMediumSize" + + // owner: @derekwaynecarr + // alpha: v1.20 + // + // Enables usage of hugepages- in downward API. + DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages" + + // owner: @freehan + // GA: v1.18 + // + // Enable ExternalTrafficPolicy for Service ExternalIPs. + // This is for bug fix #69811 + ExternalPolicyForExternalIP featuregate.Feature = "ExternalPolicyForExternalIP" + + // owner: @bswartz + // alpha: v1.18 + // + // Enables usage of any object for volume data source in PVCs + AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource" + + // owner: @javidiaz + // alpha: v1.19 + // beta: v1.20 + // + // Allow setting the Fully Qualified Domain Name (FQDN) in the hostname of a Pod. If a Pod does not + // have FQDN, this feature has no effect. + SetHostnameAsFQDN featuregate.Feature = "SetHostnameAsFQDN" + + // owner: @ksubrmnn + // alpha: v1.14 + // beta: v1.20 + // + // Allows kube-proxy to run in Overlay mode for Windows + WinOverlay featuregate.Feature = "WinOverlay" + + // owner: @ksubrmnn + // alpha: v1.14 + // + // Allows kube-proxy to create DSR loadbalancers for Windows + WinDSR featuregate.Feature = "WinDSR" + + // owner: @RenaudWasTaken @dashpole + // alpha: v1.19 + // beta: v1.20 + // + // Disables Accelerator Metrics Collected by Kubelet + DisableAcceleratorUsageMetrics featuregate.Feature = "DisableAcceleratorUsageMetrics" + + // owner: @arjunrn @mwielgus @josephburnett + // alpha: v1.20 + // + // Add support for the HPA to scale based on metrics from individual containers + // in target pods + HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics" + + // owner: @zshihang + // alpha: v1.13 + // beta: v1.20 + // + // Allows kube-controller-manager to publish kube-root-ca.crt configmap to + // every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume. + RootCAConfigMap featuregate.Feature = "RootCAConfigMap" + + // owner: @andrewsykim + // alpha: v1.20 + // + // Enable Terminating condition in Endpoint Slices. + EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition" + + // owner: @robscott + // alpha: v1.20 + // + // Enable NodeName field on Endpoint Slices. + EndpointSliceNodeName featuregate.Feature = "EndpointSliceNodeName" + + // owner: @derekwaynecarr + // alpha: v1.20 + // + // Enables kubelet support to size memory backed volumes + SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes" + + // owner: @andrewsykim @SergeyKanzhelev + // GA: v1.20 + // + // Ensure kubelet respects exec probe timeouts. Feature gate exists in-case existing workloads + // may depend on old behavior where exec probe timeouts were ignored. + // Lock to default in v1.21 and remove in v1.22. + ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout" + + // owner: @andrewsykim + // alpha: v1.20 + // + // Enable kubelet exec plugins for image pull credentials. + KubeletCredentialProviders featuregate.Feature = "KubeletCredentialProviders" + + // owner: @zshihang + // alpha: v1.20 + // + // Enable kubelet to pass pod's service account token to NodePublishVolume + // call of CSI driver which is mounting volumes for that pod. + CSIServiceAccountToken featuregate.Feature = "CSIServiceAccountToken" + + // owner: @bobbypage + // alpha: v1.20 + // Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown. + GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown" + + // owner: @andrewsykim @uablrek + // alpha: v1.20 + // + // Allows control if NodePorts shall be created for services with "type: LoadBalancer" by defining the spec.AllocateLoadBalancerNodePorts field (bool) + ServiceLBNodePortControl featuregate.Feature = "ServiceLBNodePortControl" + + // owner: @janosi + // alpha: v1.20 + // + // Enables the usage of different protocols in the same Service with type=LoadBalancer + MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService" +) diff --git a/kube/pkg/quota/v1/evaluator/core/persistent_volume_claims.go b/kube/pkg/quota/v1/evaluator/core/persistent_volume_claims.go new file mode 100644 index 000000000..5ca2ab82d --- /dev/null +++ b/kube/pkg/quota/v1/evaluator/core/persistent_volume_claims.go @@ -0,0 +1,188 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "fmt" + "strings" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper" + k8sfeatures "kubesphere.io/kubesphere/kube/pkg/features" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" +) + +// the name used for object count quota +var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()) + +// pvcResources are the set of static resources managed by quota associated with pvcs. +// for each resource in this list, it may be refined dynamically based on storage class. +var pvcResources = []corev1.ResourceName{ + corev1.ResourcePersistentVolumeClaims, + corev1.ResourceRequestsStorage, +} + +// storageClassSuffix is the suffix to the qualified portion of storage class resource name. +// For example, if you want to quota storage by storage class, you would have a declaration +// that follows .storageclass.storage.k8s.io/. +// For example: +// * gold.storageclass.storage.k8s.io/: 500Gi +// * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi +const storageClassSuffix string = ".storageclass.storage.k8s.io/" + +/* TODO: prune? +// ResourceByStorageClass returns a quota resource name by storage class. +func ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) +} +*/ + +// V1ResourceByStorageClass returns a quota resource name by storage class. +func V1ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) +} + +// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims +func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) + pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace} + return pvcEvaluator +} + +// pvcEvaluator knows how to evaluate quota usage for persistent volume claims +type pvcEvaluator struct { + // listFuncByNamespace knows how to list pvc claims + listFuncByNamespace generic.ListFuncByNamespace +} + +// Constraints verifies that all required resources are present on the item. +func (p *pvcEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { + // no-op for persistent volume claims + return nil +} + +// GroupResource that this evaluator tracks +func (p *pvcEvaluator) GroupResource() schema.GroupResource { + return corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource() +} + +// Handles returns true if the evaluator should handle the specified operation. +func (p *pvcEvaluator) Handles(a admission.Attributes) bool { + op := a.GetOperation() + if op == admission.Create { + return true + } + if op == admission.Update && utilfeature.DefaultFeatureGate.Enabled(k8sfeatures.ExpandPersistentVolumes) { + return true + } + return false +} + +// Matches returns true if the evaluator matches the specified quota with the provided input item +func (p *pvcEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { + return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc) +} + +// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. +func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. +// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +// MatchingResources takes the input specified list of resources and returns the set of resources it matches. +func (p *pvcEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName { + result := []corev1.ResourceName{} + for _, item := range items { + // match object count quota fields + if quota.Contains([]corev1.ResourceName{pvcObjectCountName}, item) { + result = append(result, item) + continue + } + // match pvc resources + if quota.Contains(pvcResources, item) { + result = append(result, item) + continue + } + // match pvc resources scoped by storage class (.storage-class.kubernetes.io/) + for _, resource := range pvcResources { + byStorageClass := storageClassSuffix + string(resource) + if strings.HasSuffix(string(item), byStorageClass) { + result = append(result, item) + break + } + } + } + return result +} + +// Usage knows how to measure usage associated with item. +func (p *pvcEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + result := corev1.ResourceList{} + pvc, err := toExternalPersistentVolumeClaimOrError(item) + if err != nil { + return result, err + } + + // charge for claim + result[corev1.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) + storageClassRef := helper.GetPersistentVolumeClaimClass(pvc) + if len(storageClassRef) > 0 { + storageClassClaim := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourcePersistentVolumeClaims)) + result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI)) + } + + // charge for storage + if request, found := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; found { + result[corev1.ResourceRequestsStorage] = request + // charge usage to the storage class (if present) + if len(storageClassRef) > 0 { + storageClassStorage := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourceRequestsStorage)) + result[storageClassStorage] = request + } + } + return result, nil +} + +// UsageStats calculates aggregate usage for the object. +func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { + return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage) +} + +// ensure we implement required interface +var _ quota.Evaluator = &pvcEvaluator{} + +func toExternalPersistentVolumeClaimOrError(obj runtime.Object) (*corev1.PersistentVolumeClaim, error) { + pvc := &corev1.PersistentVolumeClaim{} + switch t := obj.(type) { + case *corev1.PersistentVolumeClaim: + pvc = t + default: + return nil, fmt.Errorf("expect *v1.PersistentVolumeClaim, got %v", t) + } + return pvc, nil +} diff --git a/kube/pkg/quota/v1/evaluator/core/pods.go b/kube/pkg/quota/v1/evaluator/core/pods.go new file mode 100644 index 000000000..2be574741 --- /dev/null +++ b/kube/pkg/quota/v1/evaluator/core/pods.go @@ -0,0 +1,397 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper" + "kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper/qos" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" +) + +// the name used for object count quota +var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("pods").GroupResource()) + +// podResources are the set of resources managed by quota associated with pods. +var podResources = []corev1.ResourceName{ + podObjectCountName, + corev1.ResourceCPU, + corev1.ResourceMemory, + corev1.ResourceEphemeralStorage, + corev1.ResourceRequestsCPU, + corev1.ResourceRequestsMemory, + corev1.ResourceRequestsEphemeralStorage, + corev1.ResourceLimitsCPU, + corev1.ResourceLimitsMemory, + corev1.ResourceLimitsEphemeralStorage, + corev1.ResourcePods, +} + +// podResourcePrefixes are the set of prefixes for resources (Hugepages, and other +// potential extended reources with specific prefix) managed by quota associated with pods. +var podResourcePrefixes = []string{ + corev1.ResourceHugePagesPrefix, + corev1.ResourceRequestsHugePagesPrefix, +} + +// requestedResourcePrefixes are the set of prefixes for resources +// that might be declared in pod's Resources.Requests/Limits +var requestedResourcePrefixes = []string{ + corev1.ResourceHugePagesPrefix, +} + +// maskResourceWithPrefix mask resource with certain prefix +// e.g. hugepages-XXX -> requests.hugepages-XXX +func maskResourceWithPrefix(resource corev1.ResourceName, prefix string) corev1.ResourceName { + return corev1.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource))) +} + +// isExtendedResourceNameForQuota returns true if the extended resource name +// has the quota related resource prefix. +func isExtendedResourceNameForQuota(name corev1.ResourceName) bool { + // As overcommit is not supported by extended resources for now, + // only quota objects in format of "requests.resourceName" is allowed. + return !helper.IsNativeResource(name) && strings.HasPrefix(string(name), corev1.DefaultResourceRequestsPrefix) +} + +// NOTE: it was a mistake, but if a quota tracks cpu or memory related resources, +// the incoming pod is required to have those values set. we should not repeat +// this mistake for other future resources (gpus, ephemeral-storage,etc). +// do not add more resources to this list! +var validationSet = sets.NewString( + string(corev1.ResourceCPU), + string(corev1.ResourceMemory), + string(corev1.ResourceRequestsCPU), + string(corev1.ResourceRequestsMemory), + string(corev1.ResourceLimitsCPU), + string(corev1.ResourceLimitsMemory), +) + +// NewPodEvaluator returns an evaluator that can evaluate pods +func NewPodEvaluator(f quota.ListerForResourceFunc, clock clock.Clock) quota.Evaluator { + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("pods")) + podEvaluator := &podEvaluator{listFuncByNamespace: listFuncByNamespace, clock: clock} + return podEvaluator +} + +// podEvaluator knows how to measure usage of pods. +type podEvaluator struct { + // knows how to list pods + listFuncByNamespace generic.ListFuncByNamespace + // used to track time + clock clock.Clock +} + +// Constraints verifies that all required resources are present on the pod +// In addition, it validates that the resources are valid (i.e. requests < limits) +func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { + pod, err := toExternalPodOrError(item) + if err != nil { + return err + } + + // BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container + // must make an explicit request for the resource. this was a mistake. it coupled + // validation with resource counting, but we did this before QoS was even defined. + // let's not make that mistake again with other resources now that QoS is defined. + requiredSet := quota.ToSet(required).Intersection(validationSet) + missingSet := sets.NewString() + for i := range pod.Spec.Containers { + enforcePodContainerConstraints(&pod.Spec.Containers[i], requiredSet, missingSet) + } + for i := range pod.Spec.InitContainers { + enforcePodContainerConstraints(&pod.Spec.InitContainers[i], requiredSet, missingSet) + } + if len(missingSet) == 0 { + return nil + } + return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) +} + +// GroupResource that this evaluator tracks +func (p *podEvaluator) GroupResource() schema.GroupResource { + return corev1.SchemeGroupVersion.WithResource("pods").GroupResource() +} + +// Handles returns true if the evaluator should handle the specified attributes. +func (p *podEvaluator) Handles(a admission.Attributes) bool { + op := a.GetOperation() + if op == admission.Create { + return true + } + return false +} + +// Matches returns true if the evaluator matches the specified quota with the provided input item +func (p *podEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { + return generic.Matches(resourceQuota, item, p.MatchingResources, podMatchesScopeFunc) +} + +// MatchingResources takes the input specified list of resources and returns the set of resources it matches. +func (p *podEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { + result := quota.Intersection(input, podResources) + for _, resource := range input { + // for resources with certain prefix, e.g. hugepages + if quota.ContainsPrefix(podResourcePrefixes, resource) { + result = append(result, resource) + } + // for extended resources + if isExtendedResourceNameForQuota(resource) { + result = append(result, resource) + } + } + + return result +} + +// MatchingScopes takes the input specified list of scopes and pod object. Returns the set of scope selectors pod matches. +func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + matchedScopes := []corev1.ScopedResourceSelectorRequirement{} + for _, selector := range scopeSelectors { + match, err := podMatchesScopeFunc(selector, item) + if err != nil { + return []corev1.ScopedResourceSelectorRequirement{}, fmt.Errorf("error on matching scope %v: %v", selector, err) + } + if match { + matchedScopes = append(matchedScopes, selector) + } + } + return matchedScopes, nil +} + +// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. +// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{} + for _, selector := range limitedScopes { + isCovered := false + for _, matchedScopeSelector := range matchedQuotaScopes { + if matchedScopeSelector.ScopeName == selector.ScopeName { + isCovered = true + break + } + } + + if !isCovered { + uncoveredScopes = append(uncoveredScopes, selector) + } + } + return uncoveredScopes, nil +} + +// Usage knows how to measure usage associated with pods +func (p *podEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + // delegate to normal usage + return PodUsageFunc(item, p.clock) +} + +// UsageStats calculates aggregate usage for the object. +func (p *podEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { + return generic.CalculateUsageStats(options, p.listFuncByNamespace, podMatchesScopeFunc, p.Usage) +} + +// verifies we implement the required interface. +var _ quota.Evaluator = &podEvaluator{} + +// enforcePodContainerConstraints checks for required resources that are not set on this container and +// adds them to missingSet. +func enforcePodContainerConstraints(container *corev1.Container, requiredSet, missingSet sets.String) { + requests := container.Resources.Requests + limits := container.Resources.Limits + containerUsage := podComputeUsageHelper(requests, limits) + containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) + if !containerSet.Equal(requiredSet) { + difference := requiredSet.Difference(containerSet) + missingSet.Insert(difference.List()...) + } +} + +// podComputeUsageHelper can summarize the pod compute quota usage based on requests and limits +func podComputeUsageHelper(requests corev1.ResourceList, limits corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + result[corev1.ResourcePods] = resource.MustParse("1") + if request, found := requests[corev1.ResourceCPU]; found { + result[corev1.ResourceCPU] = request + result[corev1.ResourceRequestsCPU] = request + } + if limit, found := limits[corev1.ResourceCPU]; found { + result[corev1.ResourceLimitsCPU] = limit + } + if request, found := requests[corev1.ResourceMemory]; found { + result[corev1.ResourceMemory] = request + result[corev1.ResourceRequestsMemory] = request + } + if limit, found := limits[corev1.ResourceMemory]; found { + result[corev1.ResourceLimitsMemory] = limit + } + if request, found := requests[corev1.ResourceEphemeralStorage]; found { + result[corev1.ResourceEphemeralStorage] = request + result[corev1.ResourceRequestsEphemeralStorage] = request + } + if limit, found := limits[corev1.ResourceEphemeralStorage]; found { + result[corev1.ResourceLimitsEphemeralStorage] = limit + } + for resource, request := range requests { + // for resources with certain prefix, e.g. hugepages + if quota.ContainsPrefix(requestedResourcePrefixes, resource) { + result[resource] = request + result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request + } + // for extended resources + if helper.IsExtendedResourceName(resource) { + // only quota objects in format of "requests.resourceName" is allowed for extended resource. + result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request + } + } + + return result +} + +func toExternalPodOrError(obj runtime.Object) (*corev1.Pod, error) { + pod := &corev1.Pod{} + switch t := obj.(type) { + case *corev1.Pod: + pod = t + default: + return nil, fmt.Errorf("expect *v1.Pod, got %v", t) + } + return pod, nil +} + +// podMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope +func podMatchesScopeFunc(selector corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { + pod, err := toExternalPodOrError(object) + if err != nil { + return false, err + } + switch selector.ScopeName { + case corev1.ResourceQuotaScopeTerminating: + return isTerminating(pod), nil + case corev1.ResourceQuotaScopeNotTerminating: + return !isTerminating(pod), nil + case corev1.ResourceQuotaScopeBestEffort: + return isBestEffort(pod), nil + case corev1.ResourceQuotaScopeNotBestEffort: + return !isBestEffort(pod), nil + case corev1.ResourceQuotaScopePriorityClass: + return podMatchesSelector(pod, selector) + } + return false, nil +} + +// PodUsageFunc returns the quota usage for a pod. +// A pod is charged for quota if the following are not true. +// - pod has a terminal phase (failed or succeeded) +// - pod has been marked for deletion and grace period has expired +func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, error) { + pod, err := toExternalPodOrError(obj) + if err != nil { + return corev1.ResourceList{}, err + } + + // always quota the object count (even if the pod is end of life) + // object count quotas track all objects that are in storage. + // where "pods" tracks all pods that have not reached a terminal state, + // count/pods tracks all pods independent of state. + result := corev1.ResourceList{ + podObjectCountName: *(resource.NewQuantity(1, resource.DecimalSI)), + } + + // by convention, we do not quota compute resources that have reached end-of life + // note: the "pods" resource is considered a compute resource since it is tied to life-cycle. + if !QuotaV1Pod(pod, clock) { + return result, nil + } + + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} + // TODO: ideally, we have pod level requests and limits in the future. + for i := range pod.Spec.Containers { + requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests) + limits = quota.Add(limits, pod.Spec.Containers[i].Resources.Limits) + } + // InitContainers are run sequentially before other containers start, so the highest + // init container resource is compared against the sum of app containers to determine + // the effective usage for both requests and limits. + for i := range pod.Spec.InitContainers { + requests = quota.Max(requests, pod.Spec.InitContainers[i].Resources.Requests) + limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits) + } + + result = quota.Add(result, podComputeUsageHelper(requests, limits)) + return result, nil +} + +func isBestEffort(pod *corev1.Pod) bool { + return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort +} + +func isTerminating(pod *corev1.Pod) bool { + if pod.Spec.ActiveDeadlineSeconds != nil && *pod.Spec.ActiveDeadlineSeconds >= int64(0) { + return true + } + return false +} + +func podMatchesSelector(pod *corev1.Pod, selector corev1.ScopedResourceSelectorRequirement) (bool, error) { + labelSelector, err := helper.ScopedResourceSelectorRequirementsAsSelector(selector) + if err != nil { + return false, fmt.Errorf("failed to parse and convert selector: %v", err) + } + var m map[string]string + if len(pod.Spec.PriorityClassName) != 0 { + m = map[string]string{string(corev1.ResourceQuotaScopePriorityClass): pod.Spec.PriorityClassName} + } + if labelSelector.Matches(labels.Set(m)) { + return true, nil + } + return false, nil +} + +// QuotaV1Pod returns true if the pod is eligible to track against a quota +// if it's not in a terminal state according to its phase. +func QuotaV1Pod(pod *corev1.Pod, clock clock.Clock) bool { + // if pod is terminal, ignore it for quota + if corev1.PodFailed == pod.Status.Phase || corev1.PodSucceeded == pod.Status.Phase { + return false + } + // if pods are stuck terminating (for example, a node is lost), we do not want + // to charge the user for that pod in quota because it could prevent them from + // scaling up new pods to service their application. + if pod.DeletionTimestamp != nil && pod.DeletionGracePeriodSeconds != nil { + now := clock.Now() + deletionTime := pod.DeletionTimestamp.Time + gracePeriod := time.Duration(*pod.DeletionGracePeriodSeconds) * time.Second + if now.After(deletionTime.Add(gracePeriod)) { + return false + } + } + return true +} diff --git a/kube/pkg/quota/v1/evaluator/core/registry.go b/kube/pkg/quota/v1/evaluator/core/registry.go new file mode 100644 index 000000000..f18cff555 --- /dev/null +++ b/kube/pkg/quota/v1/evaluator/core/registry.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/clock" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" +) + +// legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias +var legacyObjectCountAliases = map[schema.GroupVersionResource]corev1.ResourceName{ + corev1.SchemeGroupVersion.WithResource("configmaps"): corev1.ResourceConfigMaps, + corev1.SchemeGroupVersion.WithResource("resourcequotas"): corev1.ResourceQuotas, + corev1.SchemeGroupVersion.WithResource("replicationcontrollers"): corev1.ResourceReplicationControllers, + corev1.SchemeGroupVersion.WithResource("secrets"): corev1.ResourceSecrets, +} + +// NewEvaluators returns the list of static evaluators that manage more than counts +func NewEvaluators(f quota.ListerForResourceFunc) []quota.Evaluator { + // these evaluators have special logic + result := []quota.Evaluator{ + NewPodEvaluator(f, clock.RealClock{}), + NewServiceEvaluator(f), + NewPersistentVolumeClaimEvaluator(f), + } + // these evaluators require an alias for backwards compatibility + for gvr, alias := range legacyObjectCountAliases { + result = append(result, + generic.NewObjectCountEvaluator(gvr.GroupResource(), generic.ListResourceUsingListerFunc(f, gvr), alias)) + } + return result +} diff --git a/kube/pkg/quota/v1/evaluator/core/services.go b/kube/pkg/quota/v1/evaluator/core/services.go new file mode 100644 index 000000000..82517b717 --- /dev/null +++ b/kube/pkg/quota/v1/evaluator/core/services.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" +) + +// the name used for object count quota +var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("services").GroupResource()) + +// serviceResources are the set of resources managed by quota associated with services. +var serviceResources = []corev1.ResourceName{ + serviceObjectCountName, + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, +} + +// NewServiceEvaluator returns an evaluator that can evaluate services. +func NewServiceEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("services")) + serviceEvaluator := &serviceEvaluator{listFuncByNamespace: listFuncByNamespace} + return serviceEvaluator +} + +// serviceEvaluator knows how to measure usage for services. +type serviceEvaluator struct { + // knows how to list items by namespace + listFuncByNamespace generic.ListFuncByNamespace +} + +// Constraints verifies that all required resources are present on the item +func (p *serviceEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { + // this is a no-op for services + return nil +} + +// GroupResource that this evaluator tracks +func (p *serviceEvaluator) GroupResource() schema.GroupResource { + return corev1.SchemeGroupVersion.WithResource("services").GroupResource() +} + +// Handles returns true of the evaluator should handle the specified operation. +func (p *serviceEvaluator) Handles(a admission.Attributes) bool { + operation := a.GetOperation() + // We handle create and update because a service type can change. + return admission.Create == operation || admission.Update == operation +} + +// Matches returns true if the evaluator matches the specified quota with the provided input item +func (p *serviceEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { + return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc) +} + +// MatchingResources takes the input specified list of resources and returns the set of resources it matches. +func (p *serviceEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { + return quota.Intersection(input, serviceResources) +} + +// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. +func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. +// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +// convert the input object to an internal service object or error. +func toExternalServiceOrError(obj runtime.Object) (*corev1.Service, error) { + svc := &corev1.Service{} + switch t := obj.(type) { + case *corev1.Service: + svc = t + default: + return nil, fmt.Errorf("expect *v1.Service, got %v", t) + } + return svc, nil +} + +// Usage knows how to measure usage associated with services +func (p *serviceEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + result := corev1.ResourceList{} + svc, err := toExternalServiceOrError(item) + if err != nil { + return result, err + } + ports := len(svc.Spec.Ports) + // default service usage + result[serviceObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI} + result[corev1.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI} + switch svc.Spec.Type { + case corev1.ServiceTypeNodePort: + // node port services need to count node ports + value := resource.NewQuantity(int64(ports), resource.DecimalSI) + result[corev1.ResourceServicesNodePorts] = *value + case corev1.ServiceTypeLoadBalancer: + // load balancer services need to count node ports and load balancers + value := resource.NewQuantity(int64(ports), resource.DecimalSI) + result[corev1.ResourceServicesNodePorts] = *value + result[corev1.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI)) + } + return result, nil +} + +// UsageStats calculates aggregate usage for the object. +func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { + return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage) +} + +var _ quota.Evaluator = &serviceEvaluator{} + +//GetQuotaServiceType returns ServiceType if the service type is eligible to track against a quota, nor return "" +func GetQuotaServiceType(service *corev1.Service) corev1.ServiceType { + switch service.Spec.Type { + case corev1.ServiceTypeNodePort: + return corev1.ServiceTypeNodePort + case corev1.ServiceTypeLoadBalancer: + return corev1.ServiceTypeLoadBalancer + } + return corev1.ServiceType("") +} diff --git a/kube/pkg/quota/v1/generic/configuration.go b/kube/pkg/quota/v1/generic/configuration.go new file mode 100644 index 000000000..fd9355745 --- /dev/null +++ b/kube/pkg/quota/v1/generic/configuration.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/kube/pkg/quota/v1" +) + +// implements a basic configuration +type simpleConfiguration struct { + evaluators []quota.Evaluator + ignoredResources map[schema.GroupResource]struct{} +} + +// NewConfiguration creates a quota configuration +func NewConfiguration(evaluators []quota.Evaluator, ignoredResources map[schema.GroupResource]struct{}) quota.Configuration { + return &simpleConfiguration{ + evaluators: evaluators, + ignoredResources: ignoredResources, + } +} + +func (c *simpleConfiguration) IgnoredResources() map[schema.GroupResource]struct{} { + return c.ignoredResources +} + +func (c *simpleConfiguration) Evaluators() []quota.Evaluator { + return c.evaluators +} diff --git a/kube/pkg/quota/v1/generic/evaluator.go b/kube/pkg/quota/v1/generic/evaluator.go new file mode 100644 index 000000000..cc3f000db --- /dev/null +++ b/kube/pkg/quota/v1/generic/evaluator.go @@ -0,0 +1,319 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "fmt" + "sync/atomic" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" +) + +// InformerForResourceFunc knows how to provision an informer +type InformerForResourceFunc func(schema.GroupVersionResource) (informers.GenericInformer, error) + +// ListerFuncForResourceFunc knows how to provision a lister from an informer func. +// The lister returns errors until the informer has synced. +func ListerFuncForResourceFunc(f InformerForResourceFunc) quota.ListerForResourceFunc { + return func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { + informer, err := f(gvr) + if err != nil { + return nil, err + } + return &protectedLister{ + hasSynced: cachedHasSynced(informer.Informer().HasSynced), + notReadyErr: fmt.Errorf("%v not yet synced", gvr), + delegate: informer.Lister(), + }, nil + } +} + +// cachedHasSynced returns a function that calls hasSynced() until it returns true once, then returns true +func cachedHasSynced(hasSynced func() bool) func() bool { + cache := &atomic.Value{} + cache.Store(false) + return func() bool { + if cache.Load().(bool) { + // short-circuit if already synced + return true + } + if hasSynced() { + // remember we synced + cache.Store(true) + return true + } + return false + } +} + +// protectedLister returns notReadyError if hasSynced returns false, otherwise delegates to delegate +type protectedLister struct { + hasSynced func() bool + notReadyErr error + delegate cache.GenericLister +} + +func (p *protectedLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + if !p.hasSynced() { + return nil, p.notReadyErr + } + return p.delegate.List(selector) +} +func (p *protectedLister) Get(name string) (runtime.Object, error) { + if !p.hasSynced() { + return nil, p.notReadyErr + } + return p.delegate.Get(name) +} +func (p *protectedLister) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &protectedNamespaceLister{p.hasSynced, p.notReadyErr, p.delegate.ByNamespace(namespace)} +} + +// protectedNamespaceLister returns notReadyError if hasSynced returns false, otherwise delegates to delegate +type protectedNamespaceLister struct { + hasSynced func() bool + notReadyErr error + delegate cache.GenericNamespaceLister +} + +func (p *protectedNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + if !p.hasSynced() { + return nil, p.notReadyErr + } + return p.delegate.List(selector) +} +func (p *protectedNamespaceLister) Get(name string) (runtime.Object, error) { + if !p.hasSynced() { + return nil, p.notReadyErr + } + return p.delegate.Get(name) +} + +// ListResourceUsingListerFunc returns a listing function based on the shared informer factory for the specified resource. +func ListResourceUsingListerFunc(l quota.ListerForResourceFunc, resource schema.GroupVersionResource) ListFuncByNamespace { + return func(namespace string) ([]runtime.Object, error) { + lister, err := l(resource) + if err != nil { + return nil, err + } + return lister.ByNamespace(namespace).List(labels.Everything()) + } +} + +// ObjectCountQuotaResourceNameFor returns the object count quota name for specified groupResource +func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) corev1.ResourceName { + if len(groupResource.Group) == 0 { + return corev1.ResourceName("count/" + groupResource.Resource) + } + return corev1.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group) +} + +// ListFuncByNamespace knows how to list resources in a namespace +type ListFuncByNamespace func(namespace string) ([]runtime.Object, error) + +// MatchesScopeFunc knows how to evaluate if an object matches a scope +type MatchesScopeFunc func(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) + +// UsageFunc knows how to measure usage associated with an object +type UsageFunc func(object runtime.Object) (corev1.ResourceList, error) + +// MatchingResourceNamesFunc is a function that returns the list of resources matched +type MatchingResourceNamesFunc func(input []corev1.ResourceName) []corev1.ResourceName + +// MatchesNoScopeFunc returns false on all match checks +func MatchesNoScopeFunc(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { + return false, nil +} + +// Matches returns true if the quota matches the specified item. +func Matches( + resourceQuota *corev1.ResourceQuota, item runtime.Object, + matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) { + if resourceQuota == nil { + return false, fmt.Errorf("expected non-nil quota") + } + // verify the quota matches on at least one resource + matchResource := len(matchFunc(quota.ResourceNames(resourceQuota.Status.Hard))) > 0 + // by default, no scopes matches all + matchScope := true + for _, scope := range getScopeSelectorsFromQuota(resourceQuota) { + innerMatch, err := scopeFunc(scope, item) + if err != nil { + return false, err + } + matchScope = matchScope && innerMatch + } + return matchResource && matchScope, nil +} + +func getScopeSelectorsFromQuota(quota *corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement { + selectors := []corev1.ScopedResourceSelectorRequirement{} + for _, scope := range quota.Spec.Scopes { + selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{ + ScopeName: scope, + Operator: corev1.ScopeSelectorOpExists}) + } + if quota.Spec.ScopeSelector != nil { + selectors = append(selectors, quota.Spec.ScopeSelector.MatchExpressions...) + } + return selectors +} + +// CalculateUsageStats is a utility function that knows how to calculate aggregate usage. +func CalculateUsageStats(options quota.UsageStatsOptions, + listFunc ListFuncByNamespace, + scopeFunc MatchesScopeFunc, + usageFunc UsageFunc) (quota.UsageStats, error) { + // default each tracked resource to zero + result := quota.UsageStats{Used: corev1.ResourceList{}} + for _, resourceName := range options.Resources { + result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI} + } + items, err := listFunc(options.Namespace) + if err != nil { + return result, fmt.Errorf("failed to list content: %v", err) + } + for _, item := range items { + // need to verify that the item matches the set of scopes + matchesScopes := true + for _, scope := range options.Scopes { + innerMatch, err := scopeFunc(corev1.ScopedResourceSelectorRequirement{ScopeName: scope}, item) + if err != nil { + return result, nil + } + if !innerMatch { + matchesScopes = false + } + } + if options.ScopeSelector != nil { + for _, selector := range options.ScopeSelector.MatchExpressions { + innerMatch, err := scopeFunc(selector, item) + if err != nil { + return result, nil + } + matchesScopes = matchesScopes && innerMatch + } + } + // only count usage if there was a match + if matchesScopes { + usage, err := usageFunc(item) + if err != nil { + return result, err + } + result.Used = quota.Add(result.Used, usage) + } + } + return result, nil +} + +// objectCountEvaluator provides an implementation for quota.Evaluator +// that associates usage of the specified resource based on the number of items +// returned by the specified listing function. +type objectCountEvaluator struct { + // GroupResource that this evaluator tracks. + // It is used to construct a generic object count quota name + groupResource schema.GroupResource + // A function that knows how to list resources by namespace. + // TODO move to dynamic client in future + listFuncByNamespace ListFuncByNamespace + // Names associated with this resource in the quota for generic counting. + resourceNames []corev1.ResourceName +} + +// Constraints returns an error if the configured resource name is not in the required set. +func (o *objectCountEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { + // no-op for object counting + return nil +} + +// Handles returns true if the object count evaluator needs to track this attributes. +func (o *objectCountEvaluator) Handles(a admission.Attributes) bool { + operation := a.GetOperation() + return operation == admission.Create +} + +// Matches returns true if the evaluator matches the specified quota with the provided input item +func (o *objectCountEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { + return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc) +} + +// MatchingResources takes the input specified list of resources and returns the set of resources it matches. +func (o *objectCountEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { + return quota.Intersection(input, o.resourceNames) +} + +// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. +func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. +// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +// Usage returns the resource usage for the specified object +func (o *objectCountEvaluator) Usage(object runtime.Object) (corev1.ResourceList, error) { + quantity := resource.NewQuantity(1, resource.DecimalSI) + resourceList := corev1.ResourceList{} + for _, resourceName := range o.resourceNames { + resourceList[resourceName] = *quantity + } + return resourceList, nil +} + +// GroupResource tracked by this evaluator +func (o *objectCountEvaluator) GroupResource() schema.GroupResource { + return o.groupResource +} + +// UsageStats calculates aggregate usage for the object. +func (o *objectCountEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { + return CalculateUsageStats(options, o.listFuncByNamespace, MatchesNoScopeFunc, o.Usage) +} + +// Verify implementation of interface at compile time. +var _ quota.Evaluator = &objectCountEvaluator{} + +// NewObjectCountEvaluator returns an evaluator that can perform generic +// object quota counting. It allows an optional alias for backwards compatibility +// purposes for the legacy object counting names in quota. Unless its supporting +// backward compatibility, alias should not be used. +func NewObjectCountEvaluator( + groupResource schema.GroupResource, listFuncByNamespace ListFuncByNamespace, + alias corev1.ResourceName) quota.Evaluator { + + resourceNames := []corev1.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)} + if len(alias) > 0 { + resourceNames = append(resourceNames, alias) + } + + return &objectCountEvaluator{ + groupResource: groupResource, + listFuncByNamespace: listFuncByNamespace, + resourceNames: resourceNames, + } +} diff --git a/kube/pkg/quota/v1/generic/registry.go b/kube/pkg/quota/v1/generic/registry.go new file mode 100644 index 000000000..d31ac699e --- /dev/null +++ b/kube/pkg/quota/v1/generic/registry.go @@ -0,0 +1,81 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" +) + +// implements a basic registry +type simpleRegistry struct { + lock sync.RWMutex + // evaluators tracked by the registry + evaluators map[schema.GroupResource]quota.Evaluator +} + +// NewRegistry creates a simple registry with initial list of evaluators +func NewRegistry(evaluators []quota.Evaluator) quota.Registry { + return &simpleRegistry{ + evaluators: evaluatorsByGroupResource(evaluators), + } +} + +func (r *simpleRegistry) Add(e quota.Evaluator) { + r.lock.Lock() + defer r.lock.Unlock() + r.evaluators[e.GroupResource()] = e +} + +func (r *simpleRegistry) Remove(e quota.Evaluator) { + r.lock.Lock() + defer r.lock.Unlock() + delete(r.evaluators, e.GroupResource()) +} + +func (r *simpleRegistry) Get(gr schema.GroupResource) quota.Evaluator { + r.lock.RLock() + defer r.lock.RUnlock() + return r.evaluators[gr] +} + +func (r *simpleRegistry) List() []quota.Evaluator { + r.lock.RLock() + defer r.lock.RUnlock() + + return evaluatorsList(r.evaluators) +} + +// evaluatorsByGroupResource converts a list of evaluators to a map by group resource. +func evaluatorsByGroupResource(items []quota.Evaluator) map[schema.GroupResource]quota.Evaluator { + result := map[schema.GroupResource]quota.Evaluator{} + for _, item := range items { + result[item.GroupResource()] = item + } + return result +} + +// evaluatorsList converts a map of evaluators to list +func evaluatorsList(input map[schema.GroupResource]quota.Evaluator) []quota.Evaluator { + var result []quota.Evaluator + for _, item := range input { + result = append(result, item) + } + return result +} diff --git a/kube/pkg/quota/v1/install/registry.go b/kube/pkg/quota/v1/install/registry.go new file mode 100644 index 000000000..4e8179790 --- /dev/null +++ b/kube/pkg/quota/v1/install/registry.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + core "kubesphere.io/kubesphere/kube/pkg/quota/v1/evaluator/core" + generic "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" +) + +// NewQuotaConfigurationForAdmission returns a quota configuration for admission control. +func NewQuotaConfigurationForAdmission() quota.Configuration { + evaluators := core.NewEvaluators(nil) + return generic.NewConfiguration(evaluators, DefaultIgnoredResources()) +} + +// NewQuotaConfigurationForControllers returns a quota configuration for controllers. +func NewQuotaConfigurationForControllers(f quota.ListerForResourceFunc) quota.Configuration { + evaluators := core.NewEvaluators(f) + return generic.NewConfiguration(evaluators, DefaultIgnoredResources()) +} + +// ignoredResources are ignored by quota by default +var ignoredResources = map[schema.GroupResource]struct{}{ + {Group: "", Resource: "events"}: {}, +} + +// DefaultIgnoredResources returns the default set of resources that quota system +// should ignore. This is exposed so downstream integrators can have access to them. +func DefaultIgnoredResources() map[schema.GroupResource]struct{} { + return ignoredResources +} diff --git a/kube/pkg/quota/v1/interfaces.go b/kube/pkg/quota/v1/interfaces.go new file mode 100644 index 000000000..d71b66418 --- /dev/null +++ b/kube/pkg/quota/v1/interfaces.go @@ -0,0 +1,88 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package quota + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/tools/cache" +) + +// UsageStatsOptions is an options structs that describes how stats should be calculated +type UsageStatsOptions struct { + // Namespace where stats should be calculate + Namespace string + // Scopes that must match counted objects + Scopes []corev1.ResourceQuotaScope + // Resources are the set of resources to include in the measurement + Resources []corev1.ResourceName + ScopeSelector *corev1.ScopeSelector +} + +// UsageStats is result of measuring observed resource use in the system +type UsageStats struct { + // Used maps resource to quantity used + Used corev1.ResourceList +} + +// Evaluator knows how to evaluate quota usage for a particular group resource +type Evaluator interface { + // Constraints ensures that each required resource is present on item + Constraints(required []corev1.ResourceName, item runtime.Object) error + // GroupResource returns the groupResource that this object knows how to evaluate + GroupResource() schema.GroupResource + // Handles determines if quota could be impacted by the specified attribute. + // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. + Handles(operation admission.Attributes) bool + // Matches returns true if the specified quota matches the input item + Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) + // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. + MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) + // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope + UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) + // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. + MatchingResources(input []corev1.ResourceName) []corev1.ResourceName + // Usage returns the resource usage for the specified object + Usage(item runtime.Object) (corev1.ResourceList, error) + // UsageStats calculates latest observed usage stats for all objects + UsageStats(options UsageStatsOptions) (UsageStats, error) +} + +// Configuration defines how the quota system is configured. +type Configuration interface { + // IgnoredResources are ignored by quota. + IgnoredResources() map[schema.GroupResource]struct{} + // Evaluators for quota evaluation. + Evaluators() []Evaluator +} + +// Registry maintains a list of evaluators +type Registry interface { + // Add to registry + Add(e Evaluator) + // Remove from registry + Remove(e Evaluator) + // Get by group resource + Get(gr schema.GroupResource) Evaluator + // List from registry + List() []Evaluator +} + +// ListerForResourceFunc knows how to get a lister for a specific resource +type ListerForResourceFunc func(schema.GroupVersionResource) (cache.GenericLister, error) diff --git a/kube/pkg/quota/v1/resources.go b/kube/pkg/quota/v1/resources.go new file mode 100644 index 000000000..9b217489d --- /dev/null +++ b/kube/pkg/quota/v1/resources.go @@ -0,0 +1,293 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package quota + +import ( + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Equals returns true if the two lists are equivalent +func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { + if len(a) != len(b) { + return false + } + + for key, value1 := range a { + value2, found := b[key] + if !found { + return false + } + if value1.Cmp(value2) != 0 { + return false + } + } + + return true +} + +// LessThanOrEqual returns true if a < b for each key in b +// If false, it returns the keys in a that exceeded b +func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { + result := true + resourceNames := []corev1.ResourceName{} + for key, value := range b { + if other, found := a[key]; found { + if other.Cmp(value) > 0 { + result = false + resourceNames = append(resourceNames, key) + } + } + } + return result, resourceNames +} + +// Max returns the result of Max(a, b) for each named resource +func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + if other, found := b[key]; found { + if value.Cmp(other) <= 0 { + result[key] = other.DeepCopy() + continue + } + } + result[key] = value.DeepCopy() + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// Add returns the result of a + b for each named resource +func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Add(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + result[key] = value.DeepCopy() + } + } + return result +} + +// SubtractWithNonNegativeResult - subtracts and returns result of a - b but +// makes sure we don't return negative values to prevent negative resource usage. +func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + zero := resource.MustParse("0") + + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + if quantity.Cmp(zero) > 0 { + result[key] = quantity + } else { + result[key] = zero + } + } + + for key := range b { + if _, found := result[key]; !found { + result[key] = zero + } + } + return result +} + +// Subtract returns the result of a - b for each named resource +func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + for key, value := range a { + quantity := value.DeepCopy() + if other, found := b[key]; found { + quantity.Sub(other) + } + result[key] = quantity + } + for key, value := range b { + if _, found := result[key]; !found { + quantity := value.DeepCopy() + quantity.Neg() + result[key] = quantity + } + } + return result +} + +// Mask returns a new resource list that only has the values with the specified names +func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { + nameSet := ToSet(names) + result := corev1.ResourceList{} + for key, value := range resources { + if nameSet.Has(string(key)) { + result[key] = value.DeepCopy() + } + } + return result +} + +// ResourceNames returns a list of all resource names in the ResourceList +func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { + result := []corev1.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +// Contains returns true if the specified item is in the list of items +func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { + for _, i := range items { + if i == item { + return true + } + } + return false +} + +// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set +func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { + for _, prefix := range prefixSet { + if strings.HasPrefix(string(item), prefix) { + return true + } + } + return false +} + +// Intersection returns the intersection of both list of resources, deduped and sorted +func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + result := make([]corev1.ResourceName, 0, len(a)) + for _, item := range a { + if Contains(result, item) { + continue + } + if !Contains(b, item) { + continue + } + result = append(result, item) + } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result +} + +// Difference returns the list of resources resulting from a-b, deduped and sorted +func Difference(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { + result := make([]corev1.ResourceName, 0, len(a)) + for _, item := range a { + if Contains(b, item) || Contains(result, item) { + continue + } + result = append(result, item) + } + sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) + return result +} + +// IsZero returns true if each key maps to the quantity value 0 +func IsZero(a corev1.ResourceList) bool { + zero := resource.MustParse("0") + for _, v := range a { + if v.Cmp(zero) != 0 { + return false + } + } + return true +} + +// IsNegative returns the set of resource names that have a negative value. +func IsNegative(a corev1.ResourceList) []corev1.ResourceName { + results := []corev1.ResourceName{} + zero := resource.MustParse("0") + for k, v := range a { + if v.Cmp(zero) < 0 { + results = append(results, k) + } + } + return results +} + +// ToSet takes a list of resource names and converts to a string set +func ToSet(resourceNames []corev1.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} + +// CalculateUsage calculates and returns the requested ResourceList usage. +// If an error is returned, usage only contains the resources which encountered no calculation errors. +func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { + // find the intersection between the hard resources on the quota + // and the resources this controller can track to know what we can + // look to measure updated usage stats for + hardResources := ResourceNames(hardLimits) + potentialResources := []corev1.ResourceName{} + evaluators := registry.List() + for _, evaluator := range evaluators { + potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) + } + // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard + matchedResources := Intersection(hardResources, potentialResources) + + errors := []error{} + + // sum the observed usage from each evaluator + newUsage := corev1.ResourceList{} + for _, evaluator := range evaluators { + // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything + intersection := evaluator.MatchingResources(matchedResources) + if len(intersection) == 0 { + continue + } + + usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector} + stats, err := evaluator.UsageStats(usageStatsOptions) + if err != nil { + // remember the error + errors = append(errors, err) + // exclude resources which encountered calculation errors + matchedResources = Difference(matchedResources, intersection) + continue + } + newUsage = Add(newUsage, stats.Used) + } + + // mask the observed usage to only the set of resources tracked by this quota + // merge our observed usage with the quota usage status + // if the new usage is different than the last usage, we will need to do an update + newUsage = Mask(newUsage, matchedResources) + return newUsage, utilerrors.NewAggregate(errors) +} diff --git a/kube/plugin/pkg/admission/resourcequota/admission.go b/kube/plugin/pkg/admission/resourcequota/admission.go new file mode 100644 index 000000000..f3a82aa42 --- /dev/null +++ b/kube/plugin/pkg/admission/resourcequota/admission.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcequota + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/admission" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" + resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota" +) + +// QuotaAdmission implements an admission controller that can enforce quota constraints +type QuotaAdmission struct { + *admission.Handler + config *resourcequotaapi.Configuration + stopCh <-chan struct{} + quotaConfiguration quota.Configuration + numEvaluators int + quotaAccessor *quotaAccessor + evaluator Evaluator +} + +// WantsQuotaConfiguration defines a function which sets quota configuration for admission plugins that need it. +type WantsQuotaConfiguration interface { + SetQuotaConfiguration(quota.Configuration) + admission.InitializationValidator +} + +var _ admission.ValidationInterface = &QuotaAdmission{} +var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&QuotaAdmission{}) +var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&QuotaAdmission{}) +var _ = WantsQuotaConfiguration(&QuotaAdmission{}) + +type liveLookupEntry struct { + expiry time.Time + items []*corev1.ResourceQuota +} + +// NewResourceQuota configures an admission controller that can enforce quota constraints +// using the provided registry. The registry must have the capability to handle group/kinds that +// are persisted by the server this admission controller is intercepting +func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, stopCh <-chan struct{}) (*QuotaAdmission, error) { + quotaAccessor, err := newQuotaAccessor() + if err != nil { + return nil, err + } + + return &QuotaAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + stopCh: stopCh, + numEvaluators: numEvaluators, + config: config, + quotaAccessor: quotaAccessor, + }, nil +} + +// SetExternalKubeClientSet registers the client into QuotaAdmission +func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) { + a.quotaAccessor.client = client +} + +// SetExternalKubeInformerFactory registers an informer factory into QuotaAdmission +func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister() +} + +// SetQuotaConfiguration assigns and initializes configuration and evaluator for QuotaAdmission +func (a *QuotaAdmission) SetQuotaConfiguration(c quota.Configuration) { + a.quotaConfiguration = c + a.evaluator = NewQuotaEvaluator(a.quotaAccessor, a.quotaConfiguration.IgnoredResources(), generic.NewRegistry(a.quotaConfiguration.Evaluators()), nil, a.config, a.numEvaluators, a.stopCh) +} + +// ValidateInitialization ensures an authorizer is set. +func (a *QuotaAdmission) ValidateInitialization() error { + if a.quotaAccessor == nil { + return fmt.Errorf("missing quotaAccessor") + } + if a.quotaAccessor.client == nil { + return fmt.Errorf("missing quotaAccessor.client") + } + if a.quotaAccessor.lister == nil { + return fmt.Errorf("missing quotaAccessor.lister") + } + if a.quotaConfiguration == nil { + return fmt.Errorf("missing quotaConfiguration") + } + if a.evaluator == nil { + return fmt.Errorf("missing evaluator") + } + return nil +} + +// Validate makes admission decisions while enforcing quota +func (a *QuotaAdmission) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) (err error) { + // ignore all operations that correspond to sub-resource actions + if attr.GetSubresource() != "" { + return nil + } + // ignore all operations that are not namespaced + if attr.GetNamespace() == "" { + return nil + } + return a.evaluator.Evaluate(attr) +} diff --git a/kube/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go b/kube/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go new file mode 100644 index 000000000..a20d0b0d2 --- /dev/null +++ b/kube/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go @@ -0,0 +1,74 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package resourcequota + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Configuration provides configuration for the ResourceQuota admission controller. +type Configuration struct { + metav1.TypeMeta + + // LimitedResources whose consumption is limited by default. + // +optional + LimitedResources []LimitedResource +} + +// LimitedResource matches a resource whose consumption is limited by default. +// To consume the resource, there must exist an associated quota that limits +// its consumption. +type LimitedResource struct { + + // APIGroup is the name of the APIGroup that contains the limited resource. + // +optional + APIGroup string `json:"apiGroup,omitempty"` + + // Resource is the name of the resource this rule applies to. + // For example, if the administrator wants to limit consumption + // of a storage resource associated with persistent volume claims, + // the value would be "persistentvolumeclaims". + Resource string `json:"resource"` + + // For each intercepted request, the quota system will evaluate + // its resource usage. It will iterate through each resource consumed + // and if the resource contains any substring in this listing, the + // quota system will ensure that there is a covering quota. In the + // absence of a covering quota, the quota system will deny the request. + // For example, if an administrator wants to globally enforce that + // that a quota must exist to consume persistent volume claims associated + // with any storage class, the list would include + // ".storageclass.storage.k8s.io/requests.storage" + MatchContains []string + + // For each intercepted request, the quota system will figure out if the input object + // satisfies a scope which is present in this listing, then + // quota system will ensure that there is a covering quota. In the + // absence of a covering quota, the quota system will deny the request. + // For example, if an administrator wants to globally enforce that + // a quota must exist to create a pod with "cluster-services" priorityclass + // the list would include + // "PriorityClassNameIn=cluster-services" + // +optional + // MatchScopes []string `json:"matchScopes,omitempty"` + MatchScopes []corev1.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"` +} diff --git a/kube/plugin/pkg/admission/resourcequota/controller.go b/kube/plugin/pkg/admission/resourcequota/controller.go new file mode 100644 index 000000000..6d15c13a3 --- /dev/null +++ b/kube/plugin/pkg/admission/resourcequota/controller.go @@ -0,0 +1,717 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcequota + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/util/workqueue" + quota "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" + resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota" +) + +// Evaluator is used to see if quota constraints are satisfied. +type Evaluator interface { + // Evaluate takes an operation and checks to see if quota constraints are satisfied. It returns an error if they are not. + // The default implementation process related operations in chunks when possible. + Evaluate(a admission.Attributes) error +} + +type quotaEvaluator struct { + quotaAccessor QuotaAccessor + // lockAcquisitionFunc acquires any required locks and returns a cleanup method to defer + lockAcquisitionFunc func([]corev1.ResourceQuota) func() + + ignoredResources map[schema.GroupResource]struct{} + + // registry that knows how to measure usage for objects + registry quota.Registry + + // TODO these are used together to bucket items by namespace and then batch them up for processing. + // The technique is valuable for rollup activities to avoid fanout and reduce resource contention. + // We could move this into a library if another component needed it. + // queue is indexed by namespace, so that we bundle up on a per-namespace basis + queue *workqueue.Type + workLock sync.Mutex + work map[string][]*admissionWaiter + dirtyWork map[string][]*admissionWaiter + inProgress sets.String + + // controls the run method so that we can cleanly conform to the Evaluator interface + workers int + stopCh <-chan struct{} + init sync.Once + + // lets us know what resources are limited by default + config *resourcequotaapi.Configuration +} + +type admissionWaiter struct { + attributes admission.Attributes + finished chan struct{} + result error +} + +type defaultDeny struct{} + +func (defaultDeny) Error() string { + return "DEFAULT DENY" +} + +// IsDefaultDeny returns true if the error is defaultDeny +func IsDefaultDeny(err error) bool { + if err == nil { + return false + } + + _, ok := err.(defaultDeny) + return ok +} + +func newAdmissionWaiter(a admission.Attributes) *admissionWaiter { + return &admissionWaiter{ + attributes: a, + finished: make(chan struct{}), + result: defaultDeny{}, + } +} + +// NewQuotaEvaluator configures an admission controller that can enforce quota constraints +// using the provided registry. The registry must have the capability to handle group/kinds that +// are persisted by the server this admission controller is intercepting +func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.GroupResource]struct{}, quotaRegistry quota.Registry, lockAcquisitionFunc func([]corev1.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { + // if we get a nil config, just create an empty default. + if config == nil { + config = &resourcequotaapi.Configuration{} + } + + return "aEvaluator{ + quotaAccessor: quotaAccessor, + lockAcquisitionFunc: lockAcquisitionFunc, + + ignoredResources: ignoredResources, + registry: quotaRegistry, + + queue: workqueue.NewNamed("admission_quota_controller"), + work: map[string][]*admissionWaiter{}, + dirtyWork: map[string][]*admissionWaiter{}, + inProgress: sets.String{}, + + workers: workers, + stopCh: stopCh, + config: config, + } +} + +// Run begins watching and syncing. +func (e *quotaEvaluator) run() { + defer utilruntime.HandleCrash() + + for i := 0; i < e.workers; i++ { + go wait.Until(e.doWork, time.Second, e.stopCh) + } + <-e.stopCh + klog.Infof("Shutting down quota evaluator") + e.queue.ShutDown() +} + +func (e *quotaEvaluator) doWork() { + workFunc := func() bool { + ns, admissionAttributes, quit := e.getWork() + if quit { + return true + } + defer e.completeWork(ns) + if len(admissionAttributes) == 0 { + return false + } + e.checkAttributes(ns, admissionAttributes) + return false + } + for { + if quit := workFunc(); quit { + klog.Infof("quota evaluator worker shutdown") + return + } + } +} + +// checkAttributes iterates evaluates all the waiting admissionAttributes. It will always notify all waiters +// before returning. The default is to deny. +func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admissionWaiter) { + // notify all on exit + defer func() { + for _, admissionAttribute := range admissionAttributes { + close(admissionAttribute.finished) + } + }() + + quotas, err := e.quotaAccessor.GetQuotas(ns) + if err != nil { + for _, admissionAttribute := range admissionAttributes { + admissionAttribute.result = err + } + return + } + // if limited resources are disabled, we can just return safely when there are no quotas. + limitedResourcesDisabled := len(e.config.LimitedResources) == 0 + if len(quotas) == 0 && limitedResourcesDisabled { + for _, admissionAttribute := range admissionAttributes { + admissionAttribute.result = nil + } + return + } + + if e.lockAcquisitionFunc != nil { + releaseLocks := e.lockAcquisitionFunc(quotas) + defer releaseLocks() + } + + e.checkQuotas(quotas, admissionAttributes, 3) +} + +// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it +// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this: +// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the +// originals +// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed +// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change +// mark the waiter as succeeded. If some quota did change, update the running quotas +// 2. If no running quota was changed, return now since no updates are needed. +// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some +// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version +// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota +// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded. +func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) { + // yet another copy to compare against originals to see if we actually have deltas + originalQuotas, err := copyQuotas(quotas) + if err != nil { + utilruntime.HandleError(err) + return + } + + atLeastOneChanged := false + for i := range admissionAttributes { + admissionAttribute := admissionAttributes[i] + newQuotas, err := e.checkRequest(quotas, admissionAttribute.attributes) + if err != nil { + admissionAttribute.result = err + continue + } + + // Don't update quota for admissionAttributes that correspond to dry-run requests + if admissionAttribute.attributes.IsDryRun() { + admissionAttribute.result = nil + continue + } + + // if the new quotas are the same as the old quotas, then this particular one doesn't issue any updates + // that means that no quota docs applied, so it can get a pass + atLeastOneChangeForThisWaiter := false + for j := range newQuotas { + if !quota.Equals(quotas[j].Status.Used, newQuotas[j].Status.Used) { + atLeastOneChanged = true + atLeastOneChangeForThisWaiter = true + break + } + } + + if !atLeastOneChangeForThisWaiter { + admissionAttribute.result = nil + } + + quotas = newQuotas + } + + // if none of the requests changed anything, there's no reason to issue an update, just fail them all now + if !atLeastOneChanged { + return + } + + // now go through and try to issue updates. Things get a little weird here: + // 1. check to see if the quota changed. If not, skip. + // 2. if the quota changed and the update passes, be happy + // 3. if the quota changed and the update fails, add the original to a retry list + var updatedFailedQuotas []corev1.ResourceQuota + var lastErr error + for i := range quotas { + newQuota := quotas[i] + + // if this quota didn't have its status changed, skip it + if quota.Equals(originalQuotas[i].Status.Used, newQuota.Status.Used) { + continue + } + + if err := e.quotaAccessor.UpdateQuotaStatus(&newQuota); err != nil { + updatedFailedQuotas = append(updatedFailedQuotas, newQuota) + lastErr = err + } + } + + if len(updatedFailedQuotas) == 0 { + // all the updates succeeded. At this point, anything with the default deny error was just waiting to + // get a successful update, so we can mark and notify + for _, admissionAttribute := range admissionAttributes { + if IsDefaultDeny(admissionAttribute.result) { + admissionAttribute.result = nil + } + } + return + } + + // at this point, errors are fatal. Update all waiters without status to failed and return + if remainingRetries <= 0 { + for _, admissionAttribute := range admissionAttributes { + if IsDefaultDeny(admissionAttribute.result) { + admissionAttribute.result = lastErr + } + } + return + } + + // this retry logic has the same bug that its possible to be checking against quota in a state that never actually exists where + // you've added a new documented, then updated an old one, your resource matches both and you're only checking one + // updates for these quota names failed. Get the current quotas in the namespace, compare by name, check to see if the + // resource versions have changed. If not, we're going to fall through an fail everything. If they all have, then we can try again + newQuotas, err := e.quotaAccessor.GetQuotas(quotas[0].Namespace) + if err != nil { + // this means that updates failed. Anything with a default deny error has failed and we need to let them know + for _, admissionAttribute := range admissionAttributes { + if IsDefaultDeny(admissionAttribute.result) { + admissionAttribute.result = lastErr + } + } + return + } + + // this logic goes through our cache to find the new version of all quotas that failed update. If something has been removed + // it is skipped on this retry. After all, you removed it. + quotasToCheck := []corev1.ResourceQuota{} + for _, newQuota := range newQuotas { + for _, oldQuota := range updatedFailedQuotas { + if newQuota.Name == oldQuota.Name { + quotasToCheck = append(quotasToCheck, newQuota) + break + } + } + } + e.checkQuotas(quotasToCheck, admissionAttributes, remainingRetries-1) +} + +func copyQuotas(in []corev1.ResourceQuota) ([]corev1.ResourceQuota, error) { + out := make([]corev1.ResourceQuota, 0, len(in)) + for _, quota := range in { + out = append(out, *quota.DeepCopy()) + } + + return out, nil +} + +// filterLimitedResourcesByGroupResource filters the input that match the specified groupResource +func filterLimitedResourcesByGroupResource(input []resourcequotaapi.LimitedResource, groupResource schema.GroupResource) []resourcequotaapi.LimitedResource { + result := []resourcequotaapi.LimitedResource{} + for i := range input { + limitedResource := input[i] + limitedGroupResource := schema.GroupResource{Group: limitedResource.APIGroup, Resource: limitedResource.Resource} + if limitedGroupResource == groupResource { + result = append(result, limitedResource) + } + } + return result +} + +// limitedByDefault determines from the specified usage and limitedResources the set of resources names +// that must be present in a covering quota. It returns empty set if it was unable to determine if +// a resource was not limited by default. +func limitedByDefault(usage corev1.ResourceList, limitedResources []resourcequotaapi.LimitedResource) []corev1.ResourceName { + result := []corev1.ResourceName{} + for _, limitedResource := range limitedResources { + for k, v := range usage { + // if a resource is consumed, we need to check if it matches on the limited resource list. + if v.Sign() == 1 { + // if we get a match, we add it to limited set + for _, matchContain := range limitedResource.MatchContains { + if strings.Contains(string(k), matchContain) { + result = append(result, k) + break + } + } + } + } + } + return result +} + +func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]corev1.ScopedResourceSelectorRequirement, error) { + scopes := []corev1.ScopedResourceSelectorRequirement{} + for _, limitedResource := range limitedResources { + matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes) + if err != nil { + klog.Errorf("Error while matching limited Scopes: %v", err) + return []corev1.ScopedResourceSelectorRequirement{}, err + } + for _, scope := range matched { + scopes = append(scopes, scope) + } + } + return scopes, nil +} + +// checkRequest verifies that the request does not exceed any quota constraint. it returns a copy of quotas not yet persisted +// that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request +func (e *quotaEvaluator) checkRequest(quotas []corev1.ResourceQuota, a admission.Attributes) ([]corev1.ResourceQuota, error) { + evaluator := e.registry.Get(a.GetResource().GroupResource()) + if evaluator == nil { + return quotas, nil + } + return CheckRequest(quotas, a, evaluator, e.config.LimitedResources) +} + +// CheckRequest is a static version of quotaEvaluator.checkRequest, possible to be called from outside. +func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator, + limited []resourcequotaapi.LimitedResource) ([]corev1.ResourceQuota, error) { + if !evaluator.Handles(a) { + return quotas, nil + } + + // if we have limited resources enabled for this resource, always calculate usage + inputObject := a.GetObject() + + // Check if object matches AdmissionConfiguration matchScopes + limitedScopes, err := getMatchedLimitedScopes(evaluator, inputObject, limited) + if err != nil { + return quotas, nil + } + + // determine the set of resource names that must exist in a covering quota + limitedResourceNames := []corev1.ResourceName{} + limitedResources := filterLimitedResourcesByGroupResource(limited, a.GetResource().GroupResource()) + if len(limitedResources) > 0 { + deltaUsage, err := evaluator.Usage(inputObject) + if err != nil { + return quotas, err + } + limitedResourceNames = limitedByDefault(deltaUsage, limitedResources) + } + limitedResourceNamesSet := quota.ToSet(limitedResourceNames) + + // find the set of quotas that are pertinent to this request + // reject if we match the quota, but usage is not calculated yet + // reject if the input object does not satisfy quota constraints + // if there are no pertinent quotas, we can just return + interestingQuotaIndexes := []int{} + // track the cumulative set of resources that were required across all quotas + // this is needed to know if we have satisfied any constraints where consumption + // was limited by default. + restrictedResourcesSet := sets.String{} + restrictedScopes := []corev1.ScopedResourceSelectorRequirement{} + for i := range quotas { + resourceQuota := quotas[i] + scopeSelectors := getScopeSelectorsFromQuota(resourceQuota) + localRestrictedScopes, err := evaluator.MatchingScopes(inputObject, scopeSelectors) + if err != nil { + return nil, fmt.Errorf("error matching scopes of quota %s, err: %v", resourceQuota.Name, err) + } + for _, scope := range localRestrictedScopes { + restrictedScopes = append(restrictedScopes, scope) + } + + match, err := evaluator.Matches(&resourceQuota, inputObject) + if err != nil { + klog.Errorf("Error occurred while matching resource quota, %v, against input object. Err: %v", resourceQuota, err) + return quotas, err + } + if !match { + continue + } + + hardResources := quota.ResourceNames(resourceQuota.Status.Hard) + restrictedResources := evaluator.MatchingResources(hardResources) + if err := evaluator.Constraints(restrictedResources, inputObject); err != nil { + return nil, admission.NewForbidden(a, fmt.Errorf("failed quota: %s: %v", resourceQuota.Name, err)) + } + if !hasUsageStats(&resourceQuota, restrictedResources) { + return nil, admission.NewForbidden(a, fmt.Errorf("status unknown for quota: %s, resources: %s", resourceQuota.Name, prettyPrintResourceNames(restrictedResources))) + } + interestingQuotaIndexes = append(interestingQuotaIndexes, i) + localRestrictedResourcesSet := quota.ToSet(restrictedResources) + restrictedResourcesSet.Insert(localRestrictedResourcesSet.List()...) + } + + // Usage of some resources cannot be counted in isolation. For example, when + // the resource represents a number of unique references to external + // resource. In such a case an evaluator needs to process other objects in + // the same namespace which needs to be known. + namespace := a.GetNamespace() + if accessor, err := meta.Accessor(inputObject); namespace != "" && err == nil { + if accessor.GetNamespace() == "" { + accessor.SetNamespace(namespace) + } + } + // there is at least one quota that definitely matches our object + // as a result, we need to measure the usage of this object for quota + // on updates, we need to subtract the previous measured usage + // if usage shows no change, just return since it has no impact on quota + deltaUsage, err := evaluator.Usage(inputObject) + if err != nil { + return quotas, err + } + + // ensure that usage for input object is never negative (this would mean a resource made a negative resource requirement) + if negativeUsage := quota.IsNegative(deltaUsage); len(negativeUsage) > 0 { + return nil, admission.NewForbidden(a, fmt.Errorf("quota usage is negative for resource(s): %s", prettyPrintResourceNames(negativeUsage))) + } + + if admission.Update == a.GetOperation() { + prevItem := a.GetOldObject() + if prevItem == nil { + return nil, admission.NewForbidden(a, fmt.Errorf("unable to get previous usage since prior version of object was not found")) + } + + // if we can definitively determine that this is not a case of "create on update", + // then charge based on the delta. Otherwise, bill the maximum + metadata, err := meta.Accessor(prevItem) + if err == nil && len(metadata.GetResourceVersion()) > 0 { + prevUsage, innerErr := evaluator.Usage(prevItem) + if innerErr != nil { + return quotas, innerErr + } + deltaUsage = quota.SubtractWithNonNegativeResult(deltaUsage, prevUsage) + } + } + + if quota.IsZero(deltaUsage) { + return quotas, nil + } + + // verify that for every resource that had limited by default consumption + // enabled that there was a corresponding quota that covered its use. + // if not, we reject the request. + hasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet) + if len(hasNoCoveringQuota) > 0 { + return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ","))) + } + + // verify that for every scope that had limited access enabled + // that there was a corresponding quota that covered it. + // if not, we reject the request. + scopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes) + if err != nil { + return quotas, err + } + if len(scopesHasNoCoveringQuota) > 0 { + return quotas, fmt.Errorf("insufficient quota to match these scopes: %v", scopesHasNoCoveringQuota) + } + + if len(interestingQuotaIndexes) == 0 { + return quotas, nil + } + + outQuotas, err := copyQuotas(quotas) + if err != nil { + return nil, err + } + + for _, index := range interestingQuotaIndexes { + resourceQuota := outQuotas[index] + + hardResources := quota.ResourceNames(resourceQuota.Status.Hard) + requestedUsage := quota.Mask(deltaUsage, hardResources) + newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage) + maskedNewUsage := quota.Mask(newUsage, quota.ResourceNames(requestedUsage)) + + if allowed, exceeded := quota.LessThanOrEqual(maskedNewUsage, resourceQuota.Status.Hard); !allowed { + failedRequestedUsage := quota.Mask(requestedUsage, exceeded) + failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded) + failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded) + return nil, admission.NewForbidden(a, + fmt.Errorf("exceeded quota: %s, requested: %s, used: %s, limited: %s", + resourceQuota.Name, + prettyPrint(failedRequestedUsage), + prettyPrint(failedUsed), + prettyPrint(failedHard))) + } + + // update to the new usage number + outQuotas[index].Status.Used = newUsage + } + + return outQuotas, nil +} + +func getScopeSelectorsFromQuota(quota corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement { + selectors := []corev1.ScopedResourceSelectorRequirement{} + for _, scope := range quota.Spec.Scopes { + selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{ + ScopeName: scope, + Operator: corev1.ScopeSelectorOpExists}) + } + if quota.Spec.ScopeSelector != nil { + for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions { + selectors = append(selectors, scopeSelector) + } + } + return selectors +} + +func (e *quotaEvaluator) Evaluate(a admission.Attributes) error { + e.init.Do(func() { + go e.run() + }) + + // is this resource ignored? + gvr := a.GetResource() + gr := gvr.GroupResource() + if _, ok := e.ignoredResources[gr]; ok { + return nil + } + + // if we do not know how to evaluate use for this resource, create an evaluator + evaluator := e.registry.Get(gr) + if evaluator == nil { + // create an object count evaluator if no evaluator previously registered + // note, we do not need aggregate usage here, so we pass a nil informer func + evaluator = generic.NewObjectCountEvaluator(gr, nil, "") + e.registry.Add(evaluator) + klog.Infof("quota admission added evaluator for: %s", gr) + } + // for this kind, check if the operation could mutate any quota resources + // if no resources tracked by quota are impacted, then just return + if !evaluator.Handles(a) { + return nil + } + waiter := newAdmissionWaiter(a) + + e.addWork(waiter) + + // wait for completion or timeout + select { + case <-waiter.finished: + case <-time.After(10 * time.Second): + return apierrors.NewInternalError(fmt.Errorf("resource quota evaluates timeout")) + } + + return waiter.result +} + +func (e *quotaEvaluator) addWork(a *admissionWaiter) { + e.workLock.Lock() + defer e.workLock.Unlock() + + ns := a.attributes.GetNamespace() + // this Add can trigger a Get BEFORE the work is added to a list, but this is ok because the getWork routine + // waits the worklock before retrieving the work to do, so the writes in this method will be observed + e.queue.Add(ns) + + if e.inProgress.Has(ns) { + e.dirtyWork[ns] = append(e.dirtyWork[ns], a) + return + } + + e.work[ns] = append(e.work[ns], a) +} + +func (e *quotaEvaluator) completeWork(ns string) { + e.workLock.Lock() + defer e.workLock.Unlock() + + e.queue.Done(ns) + e.work[ns] = e.dirtyWork[ns] + delete(e.dirtyWork, ns) + e.inProgress.Delete(ns) +} + +// getWork returns a namespace, a list of work items in that +// namespace, and a shutdown boolean. If not shutdown then the return +// must eventually be followed by a call on completeWork for the +// returned namespace (regardless of whether the work item list is +// empty). +func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) { + uncastNS, shutdown := e.queue.Get() + if shutdown { + return "", []*admissionWaiter{}, shutdown + } + ns := uncastNS.(string) + + e.workLock.Lock() + defer e.workLock.Unlock() + // at this point, we know we have a coherent view of e.work. It is entirely possible + // that our workqueue has another item requeued to it, but we'll pick it up early. This ok + // because the next time will go into our dirty list + + work := e.work[ns] + delete(e.work, ns) + delete(e.dirtyWork, ns) + e.inProgress.Insert(ns) + return ns, work, false +} + +// prettyPrint formats a resource list for usage in errors +// it outputs resources sorted in increasing order +func prettyPrint(item corev1.ResourceList) string { + parts := []string{} + keys := []string{} + for key := range item { + keys = append(keys, string(key)) + } + sort.Strings(keys) + for _, key := range keys { + value := item[corev1.ResourceName(key)] + constraint := key + "=" + value.String() + parts = append(parts, constraint) + } + return strings.Join(parts, ",") +} + +func prettyPrintResourceNames(a []corev1.ResourceName) string { + values := []string{} + for _, value := range a { + values = append(values, string(value)) + } + sort.Strings(values) + return strings.Join(values, ",") +} + +// hasUsageStats returns true if for each hard constraint in interestingResources there is a value for its current usage +func hasUsageStats(resourceQuota *corev1.ResourceQuota, interestingResources []corev1.ResourceName) bool { + interestingSet := quota.ToSet(interestingResources) + for resourceName := range resourceQuota.Status.Hard { + if !interestingSet.Has(string(resourceName)) { + continue + } + if _, found := resourceQuota.Status.Used[resourceName]; !found { + return false + } + } + return true +} diff --git a/kube/plugin/pkg/admission/resourcequota/resource_access.go b/kube/plugin/pkg/admission/resourcequota/resource_access.go new file mode 100644 index 000000000..4297f7533 --- /dev/null +++ b/kube/plugin/pkg/admission/resourcequota/resource_access.go @@ -0,0 +1,155 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcequota + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/golang-lru" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/storage/etcd3" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// QuotaAccessor abstracts the get/set logic from the rest of the Evaluator. This could be a test stub, a straight passthrough, +// or most commonly a series of deconflicting caches. +type QuotaAccessor interface { + // UpdateQuotaStatus is called to persist final status. This method should write to persistent storage. + // An error indicates that write didn't complete successfully. + UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error + + // GetQuotas gets all possible quotas for a given namespace + GetQuotas(namespace string) ([]corev1.ResourceQuota, error) +} + +type quotaAccessor struct { + client kubernetes.Interface + + // lister can list/get quota objects from a shared informer's cache + lister corev1listers.ResourceQuotaLister + + // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. + // This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. + // We track the lookup result here so that for repeated requests, we don't look it up very often. + liveLookupCache *lru.Cache + liveTTL time.Duration + // updatedQuotas holds a cache of quotas that we've updated. This is used to pull the "really latest" during back to + // back quota evaluations that touch the same quota doc. This only works because we can compare etcd resourceVersions + // for the same resource as integers. Before this change: 22 updates with 12 conflicts. after this change: 15 updates with 0 conflicts + updatedQuotas *lru.Cache +} + +// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects. +func newQuotaAccessor() (*quotaAccessor, error) { + liveLookupCache, err := lru.New(100) + if err != nil { + return nil, err + } + updatedCache, err := lru.New(100) + if err != nil { + return nil, err + } + + // client and lister will be set when SetInternalKubeClientSet and SetInternalKubeInformerFactory are invoked + return "aAccessor{ + liveLookupCache: liveLookupCache, + liveTTL: time.Duration(30 * time.Second), + updatedQuotas: updatedCache, + }, nil +} + +func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { + updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(context.TODO(),newQuota,metav1.UpdateOptions{}) + if err != nil { + return err + } + + key := newQuota.Namespace + "/" + newQuota.Name + e.updatedQuotas.Add(key, updatedQuota) + return nil +} + +var etcdVersioner = etcd3.APIObjectVersioner{} + +// checkCache compares the passed quota against the value in the look-aside cache and returns the newer +// if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions +// being monotonically increasing integers +func (e *quotaAccessor) checkCache(quota *corev1.ResourceQuota) *corev1.ResourceQuota { + key := quota.Namespace + "/" + quota.Name + uncastCachedQuota, ok := e.updatedQuotas.Get(key) + if !ok { + return quota + } + cachedQuota := uncastCachedQuota.(*corev1.ResourceQuota) + + if etcdVersioner.CompareResourceVersion(quota, cachedQuota) >= 0 { + e.updatedQuotas.Remove(key) + return quota + } + return cachedQuota +} + +func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, error) { + // determine if there are any quotas in this namespace + // if there are no quotas, we don't need to do anything + items, err := e.lister.ResourceQuotas(namespace).List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("error resolving quota: %v", err) + } + + // if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it. + if len(items) == 0 { + lruItemObj, ok := e.liveLookupCache.Get(namespace) + if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) { + // TODO: If there are multiple operations at the same time and cache has just expired, + // this may cause multiple List operations being issued at the same time. + // If there is already in-flight List() for a given namespace, we should wait until + // it is finished and cache is updated instead of doing the same, also to avoid + // throttling - see #22422 for details. + liveList, err := e.client.CoreV1().ResourceQuotas(namespace).List(context.TODO(),metav1.ListOptions{}) + if err != nil { + return nil, err + } + newEntry := liveLookupEntry{expiry: time.Now().Add(e.liveTTL)} + for i := range liveList.Items { + newEntry.items = append(newEntry.items, &liveList.Items[i]) + } + e.liveLookupCache.Add(namespace, newEntry) + lruItemObj = newEntry + } + lruEntry := lruItemObj.(liveLookupEntry) + for i := range lruEntry.items { + items = append(items, lruEntry.items[i]) + } + } + + resourceQuotas := []corev1.ResourceQuota{} + for i := range items { + quota := items[i] + quota = e.checkCache(quota) + // always make a copy. We're going to muck around with this and we should never mutate the originals + resourceQuotas = append(resourceQuotas, *quota) + } + + return resourceQuotas, nil +} diff --git a/pkg/apis/addtoscheme_quota_v1alpha2.go b/pkg/apis/addtoscheme_quota_v1alpha2.go new file mode 100644 index 000000000..fd3c3fd41 --- /dev/null +++ b/pkg/apis/addtoscheme_quota_v1alpha2.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, quotav1alpha2.SchemeBuilder.AddToScheme) +} diff --git a/pkg/apis/quota/group.go b/pkg/apis/quota/group.go new file mode 100644 index 000000000..0d868898f --- /dev/null +++ b/pkg/apis/quota/group.go @@ -0,0 +1,18 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package quota contains quota API versions +package quota diff --git a/pkg/apis/quota/v1alpha2/doc.go b/pkg/apis/quota/v1alpha2/doc.go new file mode 100644 index 000000000..ca399d3b8 --- /dev/null +++ b/pkg/apis/quota/v1alpha2/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha2 contains API Schema definitions for the quotas v1alpha2 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/quota +// +k8s:defaulter-gen=TypeMeta +// +groupName=quota.kubesphere.io +package v1alpha2 diff --git a/pkg/apis/quota/v1alpha2/register.go b/pkg/apis/quota/v1alpha2/register.go new file mode 100644 index 000000000..d9dd4ce33 --- /dev/null +++ b/pkg/apis/quota/v1alpha2/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha2 contains API Schema definitions for the quotas v1alpha2 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/quota +// +k8s:defaulter-gen=TypeMeta +// +groupName=quota.kubesphere.io +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "quota.kubesphere.io", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/pkg/apis/quota/v1alpha2/types.go b/pkg/apis/quota/v1alpha2/types.go new file mode 100644 index 000000000..2b34c590f --- /dev/null +++ b/pkg/apis/quota/v1alpha2/types.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ResourceKindCluster = "ResourceQuota" + ResourcesSingularCluster = "resourcequota" + ResourcesPluralCluster = "resourcequotas" +) + +func init() { + SchemeBuilder.Register(&ResourceQuota{}, &ResourceQuotaList{}) +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true + +// WorkspaceResourceQuota sets aggregate quota restrictions enforced per workspace +// +kubebuilder:resource:categories="quota",scope="Cluster" +// +kubebuilder:subresource:status +type ResourceQuota struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota + Spec ResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceQuotaSpec defines the desired quota restrictions +type ResourceQuotaSpec struct { + // LabelSelector is used to select projects by label. + LabelSelector map[string]string `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // Quota defines the desired quota + Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"` +} + +// ResourceQuotaStatus defines the actual enforced quota and its current usage +type ResourceQuotaStatus struct { + // Total defines the actual enforced quota and its current usage across all projects + Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"` + + // Namespaces slices the usage by project. + Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace +type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace + +// ResourceQuotaStatusByNamespace gives status for a particular project +type ResourceQuotaStatusByNamespace struct { + corev1.ResourceQuotaStatus `json:",inline"` + + // Namespace the project this status applies to + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of WorkspaceResourceQuota items. +type ResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of WorkspaceResourceQuota objects. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/pkg/apis/quota/v1alpha2/types_test.go b/pkg/apis/quota/v1alpha2/types_test.go new file mode 100644 index 000000000..356957c18 --- /dev/null +++ b/pkg/apis/quota/v1alpha2/types_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "testing" + + "github.com/onsi/gomega" + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestStorageResourceQuota(t *testing.T) { + key := types.NamespacedName{ + Name: "foo", + } + created := &ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: ResourceQuotaSpec{ + LabelSelector: map[string]string{}, + }, + } + g := gomega.NewGomegaWithT(t) + + // Test Create + fetched := &ResourceQuota{ + Spec: ResourceQuotaSpec{ + LabelSelector: map[string]string{}, + }, + } + g.Expect(c.Create(context.TODO(), created)).To(gomega.Succeed()) + + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed()) + g.Expect(fetched).To(gomega.Equal(created)) + + // Test Updating the Labels + updated := fetched.DeepCopy() + updated.Labels = map[string]string{"hello": "world"} + g.Expect(c.Update(context.TODO(), updated)).To(gomega.Succeed()) + + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed()) + g.Expect(fetched).To(gomega.Equal(updated)) + + // Test Delete + g.Expect(c.Delete(context.TODO(), fetched)).To(gomega.Succeed()) + g.Expect(c.Get(context.TODO(), key, fetched)).ToNot(gomega.Succeed()) +} diff --git a/pkg/apis/quota/v1alpha2/v1alpha2_suite_test.go b/pkg/apis/quota/v1alpha2/v1alpha2_suite_test.go new file mode 100644 index 000000000..717d9ce14 --- /dev/null +++ b/pkg/apis/quota/v1alpha2/v1alpha2_suite_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "log" + "os" + "path/filepath" + "testing" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +var cfg *rest.Config +var c client.Client + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")}, + } + + err := SchemeBuilder.AddToScheme(scheme.Scheme) + if err != nil { + log.Fatal(err) + } + + if cfg, err = t.Start(); err != nil { + log.Fatal(err) + } + + if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil { + log.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} diff --git a/pkg/apis/quota/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/quota/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..5be7a8ab7 --- /dev/null +++ b/pkg/apis/quota/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,167 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Quota.DeepCopyInto(&out.Quota) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + in.Total.DeepCopyInto(&out.Total) + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) { + *out = *in + in.ResourceQuotaStatus.DeepCopyInto(&out.ResourceQuotaStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace. +func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotaStatusByNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) { + { + in := &in + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace. +func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotasStatusByNamespace) + in.DeepCopyInto(out) + return *out +} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index d06040f09..6537fe1df 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -30,6 +30,7 @@ import ( devopsv1alpha3 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha3" iamv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" storagev1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/storage/v1alpha1" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1" @@ -45,6 +46,7 @@ type Interface interface { DevopsV1alpha3() devopsv1alpha3.DevopsV1alpha3Interface IamV1alpha2() iamv1alpha2.IamV1alpha2Interface NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface + QuotaV1alpha2() quotav1alpha2.QuotaV1alpha2Interface ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface @@ -62,6 +64,7 @@ type Clientset struct { devopsV1alpha3 *devopsv1alpha3.DevopsV1alpha3Client iamV1alpha2 *iamv1alpha2.IamV1alpha2Client networkV1alpha1 *networkv1alpha1.NetworkV1alpha1Client + quotaV1alpha2 *quotav1alpha2.QuotaV1alpha2Client servicemeshV1alpha2 *servicemeshv1alpha2.ServicemeshV1alpha2Client storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client tenantV1alpha1 *tenantv1alpha1.TenantV1alpha1Client @@ -99,6 +102,11 @@ func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface { return c.networkV1alpha1 } +// QuotaV1alpha2 retrieves the QuotaV1alpha2Client +func (c *Clientset) QuotaV1alpha2() quotav1alpha2.QuotaV1alpha2Interface { + return c.quotaV1alpha2 +} + // ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface { return c.servicemeshV1alpha2 @@ -169,6 +177,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.quotaV1alpha2, err = quotav1alpha2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.servicemeshV1alpha2, err = servicemeshv1alpha2.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -207,6 +219,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.devopsV1alpha3 = devopsv1alpha3.NewForConfigOrDie(c) cs.iamV1alpha2 = iamv1alpha2.NewForConfigOrDie(c) cs.networkV1alpha1 = networkv1alpha1.NewForConfigOrDie(c) + cs.quotaV1alpha2 = quotav1alpha2.NewForConfigOrDie(c) cs.servicemeshV1alpha2 = servicemeshv1alpha2.NewForConfigOrDie(c) cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) cs.tenantV1alpha1 = tenantv1alpha1.NewForConfigOrDie(c) @@ -226,6 +239,7 @@ func New(c rest.Interface) *Clientset { cs.devopsV1alpha3 = devopsv1alpha3.New(c) cs.iamV1alpha2 = iamv1alpha2.New(c) cs.networkV1alpha1 = networkv1alpha1.New(c) + cs.quotaV1alpha2 = quotav1alpha2.New(c) cs.servicemeshV1alpha2 = servicemeshv1alpha2.New(c) cs.storageV1alpha1 = storagev1alpha1.New(c) cs.tenantV1alpha1 = tenantv1alpha1.New(c) diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 8a8f43774..b13a4bd00 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -37,6 +37,8 @@ import ( fakeiamv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake" networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1" fakenetworkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1/fake" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2" + fakequotav1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" fakeservicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake" storagev1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/storage/v1alpha1" @@ -126,6 +128,11 @@ func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface { return &fakenetworkv1alpha1.FakeNetworkV1alpha1{Fake: &c.Fake} } +// QuotaV1alpha2 retrieves the QuotaV1alpha2Client +func (c *Clientset) QuotaV1alpha2() quotav1alpha2.QuotaV1alpha2Interface { + return &fakequotav1alpha2.FakeQuotaV1alpha2{Fake: &c.Fake} +} + // ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface { return &fakeservicemeshv1alpha2.FakeServicemeshV1alpha2{Fake: &c.Fake} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index f749e88d6..eb9569f56 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -30,6 +30,7 @@ import ( devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" storagev1alpha1 "kubesphere.io/kubesphere/pkg/apis/storage/v1alpha1" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" @@ -47,6 +48,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ devopsv1alpha3.AddToScheme, iamv1alpha2.AddToScheme, networkv1alpha1.AddToScheme, + quotav1alpha2.AddToScheme, servicemeshv1alpha2.AddToScheme, storagev1alpha1.AddToScheme, tenantv1alpha1.AddToScheme, diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index c0b5d6837..28002f8f3 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -30,6 +30,7 @@ import ( devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" storagev1alpha1 "kubesphere.io/kubesphere/pkg/apis/storage/v1alpha1" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" @@ -47,6 +48,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ devopsv1alpha3.AddToScheme, iamv1alpha2.AddToScheme, networkv1alpha1.AddToScheme, + quotav1alpha2.AddToScheme, servicemeshv1alpha2.AddToScheme, storagev1alpha1.AddToScheme, tenantv1alpha1.AddToScheme, diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/doc.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/doc.go new file mode 100644 index 000000000..f6328297b --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/doc.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/doc.go new file mode 100644 index 000000000..7e36dbca8 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/fake_quota_client.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/fake_quota_client.go new file mode 100644 index 000000000..e6b0350f9 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/fake_quota_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2" +) + +type FakeQuotaV1alpha2 struct { + *testing.Fake +} + +func (c *FakeQuotaV1alpha2) ResourceQuotas() v1alpha2.ResourceQuotaInterface { + return &FakeResourceQuotas{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeQuotaV1alpha2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/fake_resourcequota.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/fake_resourcequota.go new file mode 100644 index 000000000..9e5f91b8d --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake/fake_resourcequota.go @@ -0,0 +1,133 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" +) + +// FakeResourceQuotas implements ResourceQuotaInterface +type FakeResourceQuotas struct { + Fake *FakeQuotaV1alpha2 +} + +var resourcequotasResource = schema.GroupVersionResource{Group: "quota.kubesphere.io", Version: "v1alpha2", Resource: "resourcequotas"} + +var resourcequotasKind = schema.GroupVersionKind{Group: "quota.kubesphere.io", Version: "v1alpha2", Kind: "ResourceQuota"} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(resourcequotasResource, name), &v1alpha2.ResourceQuota{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ResourceQuota), err +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *FakeResourceQuotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceQuotaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(resourcequotasResource, resourcequotasKind, opts), &v1alpha2.ResourceQuotaList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.ResourceQuotaList{ListMeta: obj.(*v1alpha2.ResourceQuotaList).ListMeta} + for _, item := range obj.(*v1alpha2.ResourceQuotaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *FakeResourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(resourcequotasResource, opts)) +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.CreateOptions) (result *v1alpha2.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(resourcequotasResource, resourceQuota), &v1alpha2.ResourceQuota{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ResourceQuota), err +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (result *v1alpha2.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(resourcequotasResource, resourceQuota), &v1alpha2.ResourceQuota{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ResourceQuota), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (*v1alpha2.ResourceQuota, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(resourcequotasResource, "status", resourceQuota), &v1alpha2.ResourceQuota{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ResourceQuota), err +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(resourcequotasResource, name), &v1alpha2.ResourceQuota{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(resourcequotasResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha2.ResourceQuotaList{}) + return err +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(resourcequotasResource, name, pt, data, subresources...), &v1alpha2.ResourceQuota{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ResourceQuota), err +} diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/generated_expansion.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/generated_expansion.go new file mode 100644 index 000000000..9dd00cbe2 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type ResourceQuotaExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/quota_client.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/quota_client.go new file mode 100644 index 000000000..1f4cb15c6 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/quota_client.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + rest "k8s.io/client-go/rest" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +type QuotaV1alpha2Interface interface { + RESTClient() rest.Interface + ResourceQuotasGetter +} + +// QuotaV1alpha2Client is used to interact with features provided by the quota.kubesphere.io group. +type QuotaV1alpha2Client struct { + restClient rest.Interface +} + +func (c *QuotaV1alpha2Client) ResourceQuotas() ResourceQuotaInterface { + return newResourceQuotas(c) +} + +// NewForConfig creates a new QuotaV1alpha2Client for the given config. +func NewForConfig(c *rest.Config) (*QuotaV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &QuotaV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new QuotaV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *QuotaV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new QuotaV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *QuotaV1alpha2Client { + return &QuotaV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *QuotaV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha2/resourcequota.go b/pkg/client/clientset/versioned/typed/quota/v1alpha2/resourcequota.go new file mode 100644 index 000000000..7a631d405 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha2/resourcequota.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas() ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.CreateOptions) (*v1alpha2.ResourceQuota, error) + Update(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (*v1alpha2.ResourceQuota, error) + UpdateStatus(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (*v1alpha2.ResourceQuota, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceQuota, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceQuotaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceQuota, err error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client rest.Interface +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *QuotaV1alpha2Client) *resourceQuotas { + return &resourceQuotas{ + client: c.RESTClient(), + } +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceQuota, err error) { + result = &v1alpha2.ResourceQuota{} + err = c.client.Get(). + Resource("resourcequotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.ResourceQuotaList{} + err = c.client.Get(). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.CreateOptions) (result *v1alpha2.ResourceQuota, err error) { + result = &v1alpha2.ResourceQuota{} + err = c.client.Post(). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceQuota). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (result *v1alpha2.ResourceQuota, err error) { + result = &v1alpha2.ResourceQuota{} + err = c.client.Put(). + Resource("resourcequotas"). + Name(resourceQuota.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceQuota). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (result *v1alpha2.ResourceQuota, err error) { + result = &v1alpha2.ResourceQuota{} + err = c.client.Put(). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceQuota). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("resourcequotas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("resourcequotas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceQuota, err error) { + result = &v1alpha2.ResourceQuota{} + err = c.client.Patch(pt). + Resource("resourcequotas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index dc54b8950..89e3269e6 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -34,6 +34,7 @@ import ( iam "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam" internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" network "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network" + quota "kubesphere.io/kubesphere/pkg/client/informers/externalversions/quota" servicemesh "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh" storage "kubesphere.io/kubesphere/pkg/client/informers/externalversions/storage" tenant "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant" @@ -185,6 +186,7 @@ type SharedInformerFactory interface { Devops() devops.Interface Iam() iam.Interface Network() network.Interface + Quota() quota.Interface Servicemesh() servicemesh.Interface Storage() storage.Interface Tenant() tenant.Interface @@ -211,6 +213,10 @@ func (f *sharedInformerFactory) Network() network.Interface { return network.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Quota() quota.Interface { + return quota.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Servicemesh() servicemesh.Interface { return servicemesh.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 9749baab1..02989bf5a 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -29,6 +29,7 @@ import ( v1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3" v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" storagev1alpha1 "kubesphere.io/kubesphere/pkg/apis/storage/v1alpha1" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" @@ -118,6 +119,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkv1alpha1.SchemeGroupVersion.WithResource("namespacenetworkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1alpha1().NamespaceNetworkPolicies().Informer()}, nil + // Group=quota.kubesphere.io, Version=v1alpha2 + case quotav1alpha2.SchemeGroupVersion.WithResource("resourcequotas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Quota().V1alpha2().ResourceQuotas().Informer()}, nil + // Group=servicemesh.kubesphere.io, Version=v1alpha2 case servicemeshv1alpha2.SchemeGroupVersion.WithResource("servicepolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Servicemesh().V1alpha2().ServicePolicies().Informer()}, nil diff --git a/pkg/client/informers/externalversions/quota/interface.go b/pkg/client/informers/externalversions/quota/interface.go new file mode 100644 index 000000000..2c73c5c0d --- /dev/null +++ b/pkg/client/informers/externalversions/quota/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package quota + +import ( + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/informers/externalversions/quota/v1alpha2" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/quota/v1alpha2/interface.go b/pkg/client/informers/externalversions/quota/v1alpha2/interface.go new file mode 100644 index 000000000..065e5d00b --- /dev/null +++ b/pkg/client/informers/externalversions/quota/v1alpha2/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ResourceQuotas returns a ResourceQuotaInformer. + ResourceQuotas() ResourceQuotaInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ResourceQuotas returns a ResourceQuotaInformer. +func (v *version) ResourceQuotas() ResourceQuotaInformer { + return &resourceQuotaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/informers/externalversions/quota/v1alpha2/resourcequota.go b/pkg/client/informers/externalversions/quota/v1alpha2/resourcequota.go new file mode 100644 index 000000000..349e491ba --- /dev/null +++ b/pkg/client/informers/externalversions/quota/v1alpha2/resourcequota.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/listers/quota/v1alpha2" +) + +// ResourceQuotaInformer provides access to a shared informer and lister for +// ResourceQuotas. +type ResourceQuotaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.ResourceQuotaLister +} + +type resourceQuotaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceQuotaInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceQuotaInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.QuotaV1alpha2().ResourceQuotas().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.QuotaV1alpha2().ResourceQuotas().Watch(context.TODO(), options) + }, + }, + "av1alpha2.ResourceQuota{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceQuotaInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor("av1alpha2.ResourceQuota{}, f.defaultInformer) +} + +func (f *resourceQuotaInformer) Lister() v1alpha2.ResourceQuotaLister { + return v1alpha2.NewResourceQuotaLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/quota/v1alpha2/expansion_generated.go b/pkg/client/listers/quota/v1alpha2/expansion_generated.go new file mode 100644 index 000000000..82d4a6d4f --- /dev/null +++ b/pkg/client/listers/quota/v1alpha2/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceQuotaListerExpansion allows custom methods to be added to +// ResourceQuotaLister. +type ResourceQuotaListerExpansion interface{} diff --git a/pkg/client/listers/quota/v1alpha2/resourcequota.go b/pkg/client/listers/quota/v1alpha2/resourcequota.go new file mode 100644 index 000000000..bae73d47f --- /dev/null +++ b/pkg/client/listers/quota/v1alpha2/resourcequota.go @@ -0,0 +1,65 @@ +/* +Copyright 2020 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" +) + +// ResourceQuotaLister helps list ResourceQuotas. +type ResourceQuotaLister interface { + // List lists all ResourceQuotas in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.ResourceQuota, err error) + // Get retrieves the ResourceQuota from the index for a given name. + Get(name string) (*v1alpha2.ResourceQuota, error) + ResourceQuotaListerExpansion +} + +// resourceQuotaLister implements the ResourceQuotaLister interface. +type resourceQuotaLister struct { + indexer cache.Indexer +} + +// NewResourceQuotaLister returns a new ResourceQuotaLister. +func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { + return &resourceQuotaLister{indexer: indexer} +} + +// List lists all ResourceQuotas in the indexer. +func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceQuota, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ResourceQuota)) + }) + return ret, err +} + +// Get retrieves the ResourceQuota from the index for a given name. +func (s *resourceQuotaLister) Get(name string) (*v1alpha2.ResourceQuota, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("resourcequota"), name) + } + return obj.(*v1alpha2.ResourceQuota), nil +} diff --git a/pkg/controller/quota/accessor.go b/pkg/controller/quota/accessor.go new file mode 100644 index 000000000..ec28161fe --- /dev/null +++ b/pkg/controller/quota/accessor.go @@ -0,0 +1,206 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package quota + +import ( + "context" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "time" + + lru "github.com/hashicorp/golang-lru" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilwait "k8s.io/apimachinery/pkg/util/wait" + etcd "k8s.io/apiserver/pkg/storage/etcd3" + utilquota "kubesphere.io/kubesphere/kube/pkg/quota/v1" +) + +// Following code copied from github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota + +type accessor struct { + client client.Client + + // updatedResourceQuotas holds a cache of quotas that we've updated. This is used to pull the "really latest" during back to + // back quota evaluations that touch the same quota doc. This only works because we can compare etcd resourceVersions + // for the same resource as integers. Before this change: 22 updates with 12 conflicts. after this change: 15 updates with 0 conflicts + updatedResourceQuotas *lru.Cache +} + +// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects. +func newQuotaAccessor(client client.Client) *accessor { + updatedCache, err := lru.New(100) + if err != nil { + // this should never happen + panic(err) + } + + return &accessor{ + client: client, + updatedResourceQuotas: updatedCache, + } +} + +// UpdateQuotaStatus the newQuota coming in will be incremented from the original. The difference between the original +// and the new is the amount to add to the namespace total, but the total status is the used value itself +func (a *accessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { + // skipping namespaced resource quota + if newQuota.APIVersion != quotav1alpha2.SchemeGroupVersion.String() { + klog.V(6).Infof("skipping namespaced resource quota %v %v", newQuota.Namespace, newQuota.Name) + return nil + } + ctx := context.TODO() + resourceQuota := "av1alpha2.ResourceQuota{} + err := a.client.Get(ctx, types.NamespacedName{Name: newQuota.Name}, resourceQuota) + if err != nil { + klog.Errorf("failed to fetch resource quota: %s, %v", newQuota.Name, err) + return err + } + resourceQuota = a.checkCache(resourceQuota) + + // re-assign objectmeta + // make a copy + updatedQuota := resourceQuota.DeepCopy() + updatedQuota.ObjectMeta = newQuota.ObjectMeta + updatedQuota.Namespace = "" + + // determine change in usage + usageDiff := utilquota.Subtract(newQuota.Status.Used, updatedQuota.Status.Total.Used) + + // update aggregate usage + updatedQuota.Status.Total.Used = newQuota.Status.Used + + // update per namespace totals + oldNamespaceTotals, _ := getResourceQuotasStatusByNamespace(updatedQuota.Status.Namespaces, newQuota.Namespace) + namespaceTotalCopy := oldNamespaceTotals.DeepCopy() + newNamespaceTotals := *namespaceTotalCopy + newNamespaceTotals.Used = utilquota.Add(oldNamespaceTotals.Used, usageDiff) + insertResourceQuotasStatus(&updatedQuota.Status.Namespaces, quotav1alpha2.ResourceQuotaStatusByNamespace{ + Namespace: newQuota.Namespace, + ResourceQuotaStatus: newNamespaceTotals, + }) + + klog.V(6).Infof("update resource quota: %+v", updatedQuota) + err = a.client.Status().Update(ctx, updatedQuota, &client.UpdateOptions{}) + if err != nil { + klog.Errorf("failed to update resource quota: %v", err) + return err + } + + a.updatedResourceQuotas.Add(resourceQuota.Name, updatedQuota) + return nil +} + +var etcdVersioner = etcd.APIObjectVersioner{} + +// checkCache compares the passed quota against the value in the look-aside cache and returns the newer +// if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions +// being monotonically increasing integers +func (a *accessor) checkCache(resourceQuota *quotav1alpha2.ResourceQuota) *quotav1alpha2.ResourceQuota { + uncastCachedQuota, ok := a.updatedResourceQuotas.Get(resourceQuota.Name) + if !ok { + return resourceQuota + } + cachedQuota := uncastCachedQuota.(*quotav1alpha2.ResourceQuota) + + if etcdVersioner.CompareResourceVersion(resourceQuota, cachedQuota) >= 0 { + a.updatedResourceQuotas.Remove(resourceQuota.Name) + return resourceQuota + } + return cachedQuota +} + +func (a *accessor) GetQuotas(namespaceName string) ([]corev1.ResourceQuota, error) { + resourceQuotaNames, err := a.waitForReadyResourceQuotaNames(namespaceName) + if err != nil { + klog.Errorf("failed to fetch resource quota names: %v, %v", namespaceName, err) + return nil, err + } + var result []corev1.ResourceQuota + for _, resourceQuotaName := range resourceQuotaNames { + resourceQuota := "av1alpha2.ResourceQuota{} + err = a.client.Get(context.TODO(), types.NamespacedName{Name: resourceQuotaName}, resourceQuota) + if err != nil { + klog.Errorf("failed to fetch resource quota %s: %v", resourceQuotaName, err) + return result, err + } + resourceQuota = a.checkCache(resourceQuota) + + // now convert to a ResourceQuota + convertedQuota := corev1.ResourceQuota{} + convertedQuota.APIVersion = quotav1alpha2.SchemeGroupVersion.String() + convertedQuota.ObjectMeta = resourceQuota.ObjectMeta + convertedQuota.Namespace = namespaceName + convertedQuota.Spec = resourceQuota.Spec.Quota + convertedQuota.Status = resourceQuota.Status.Total + result = append(result, convertedQuota) + } + + // avoid conflicts with namespaced resource quota + namespacedResourceQuotas, err := a.waitForReadyNamespacedResourceQuotas(namespaceName) + if err != nil { + klog.Errorf("failed to fetch namespaced resource quotas: %v, %v", namespaceName, err) + return nil, err + } + for _, resourceQuota := range namespacedResourceQuotas { + resourceQuota.APIVersion = corev1.SchemeGroupVersion.String() + result = append(result, resourceQuota) + } + return result, nil +} + +func (a *accessor) waitForReadyResourceQuotaNames(namespaceName string) ([]string, error) { + ctx := context.TODO() + var resourceQuotaNames []string + var err error + // wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds. + err = utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) { + resourceQuotaNames, err = resourceQuotaNamesFor(ctx, a.client, namespaceName) + // if we can't find the namespace yet, just wait for the cache to update. Requests to non-existent namespaces + // may hang, but those people are doing something wrong and namespace lifecycle should reject them. + if apierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil + }) + return resourceQuotaNames, err +} + +func (a *accessor) waitForReadyNamespacedResourceQuotas(namespaceName string) ([]corev1.ResourceQuota, error) { + ctx := context.TODO() + var resourceQuotas []corev1.ResourceQuota + var err error + // wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds. + err = utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) { + resourceQuotaList := &corev1.ResourceQuotaList{} + err = a.client.List(ctx, resourceQuotaList, &client.ListOptions{Namespace: namespaceName}) + if err != nil { + return false, err + } + resourceQuotas = resourceQuotaList.Items + return true, nil + }) + return resourceQuotas, err +} diff --git a/pkg/controller/quota/lockfactory.go b/pkg/controller/quota/lockfactory.go new file mode 100644 index 000000000..7c5af60e6 --- /dev/null +++ b/pkg/controller/quota/lockfactory.go @@ -0,0 +1,59 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package quota + +import ( + "sync" +) + +// Following code copied from github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota +type LockFactory interface { + GetLock(string) sync.Locker +} + +type DefaultLockFactory struct { + lock sync.RWMutex + + locks map[string]sync.Locker +} + +func NewDefaultLockFactory() *DefaultLockFactory { + return &DefaultLockFactory{locks: map[string]sync.Locker{}} +} + +func (f *DefaultLockFactory) GetLock(key string) sync.Locker { + lock, exists := f.getExistingLock(key) + if exists { + return lock + } + + f.lock.Lock() + defer f.lock.Unlock() + lock = &sync.Mutex{} + f.locks[key] = lock + return lock +} + +func (f *DefaultLockFactory) getExistingLock(key string) (sync.Locker, bool) { + f.lock.RLock() + defer f.lock.RUnlock() + + lock, exists := f.locks[key] + return lock, exists +} diff --git a/pkg/controller/quota/resourcequota_controller.go b/pkg/controller/quota/resourcequota_controller.go new file mode 100644 index 000000000..5593e07a4 --- /dev/null +++ b/pkg/controller/quota/resourcequota_controller.go @@ -0,0 +1,299 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package quota + +import ( + "context" + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/client-go/tools/record" + "k8s.io/klog" + evaluatorcore "kubesphere.io/kubesphere/kube/pkg/quota/v1/evaluator/core" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/install" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" + "math" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + "time" + + k8sinformers "k8s.io/client-go/informers" + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1 "k8s.io/api/core/v1" + + quotav1 "kubesphere.io/kubesphere/kube/pkg/quota/v1" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +const ( + ControllerName = "resourcequota-controller" + DefaultResyncPeriod = 5 * time.Minute + DefaultMaxConcurrentReconciles = 8 +) + +// Reconciler reconciles a Workspace object +type Reconciler struct { + client.Client + logger logr.Logger + recorder record.EventRecorder + maxConcurrentReconciles int + // Knows how to calculate usage + registry quotav1.Registry + // Controls full recalculation of quota usage + resyncPeriod time.Duration + scheme *runtime.Scheme +} + +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int, resyncPeriod time.Duration, informerFactory k8sinformers.SharedInformerFactory) error { + r.logger = ctrl.Log.WithName("controllers").WithName(ControllerName) + r.recorder = mgr.GetEventRecorderFor(ControllerName) + r.scheme = mgr.GetScheme() + r.registry = generic.NewRegistry(install.NewQuotaConfigurationForControllers(generic.ListerFuncForResourceFunc(informerFactory.ForResource)).Evaluators()) + if r.Client == nil { + r.Client = mgr.GetClient() + } + if maxConcurrentReconciles > 0 { + r.maxConcurrentReconciles = maxConcurrentReconciles + } else { + r.maxConcurrentReconciles = DefaultMaxConcurrentReconciles + } + r.resyncPeriod = time.Duration(math.Max(float64(resyncPeriod), float64(DefaultResyncPeriod))) + c, err := ctrl.NewControllerManagedBy(mgr). + Named(ControllerName). + WithOptions(controller.Options{ + MaxConcurrentReconciles: r.maxConcurrentReconciles, + }). + For("av1alpha2.ResourceQuota{}). + WithEventFilter(predicate.GenerationChangedPredicate{ + Funcs: predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldQuota := e.ObjectOld.(*quotav1alpha2.ResourceQuota) + newQuota := e.ObjectNew.(*quotav1alpha2.ResourceQuota) + return !equality.Semantic.DeepEqual(oldQuota.Spec, newQuota.Spec) + }, + }, + }). + Build(r) + if err != nil { + return err + } + + resources := []runtime.Object{ + &corev1.Pod{}, + &corev1.Service{}, + &corev1.PersistentVolumeClaim{}, + } + realClock := clock.RealClock{} + for _, resource := range resources { + err := c.Watch( + &source.Kind{Type: resource}, + &handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.mapper)}, + predicate.Funcs{ + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + notifyChange := false + // we only want to queue the updates we care about though as too much noise will overwhelm queue. + switch e.MetaOld.(type) { + case *corev1.Pod: + oldPod := e.ObjectOld.(*corev1.Pod) + newPod := e.ObjectNew.(*corev1.Pod) + notifyChange = evaluatorcore.QuotaV1Pod(oldPod, realClock) && !evaluatorcore.QuotaV1Pod(newPod, realClock) + case *corev1.Service: + oldService := e.ObjectOld.(*corev1.Service) + newService := e.ObjectNew.(*corev1.Service) + notifyChange = evaluatorcore.GetQuotaServiceType(oldService) != evaluatorcore.GetQuotaServiceType(newService) + case *corev1.PersistentVolumeClaim: + notifyChange = true + } + return notifyChange + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + }) + if err != nil { + return err + } + } + return nil +} + +func (r *Reconciler) mapper(h handler.MapObject) []reconcile.Request { + // check if the quota controller can evaluate this kind, if not, ignore it altogether... + var result []reconcile.Request + evaluators := r.registry.List() + ctx := context.TODO() + resourceQuotaNames, err := resourceQuotaNamesFor(ctx, r.Client, h.Meta.GetNamespace()) + if err != nil { + klog.Errorf("failed to get resource quota names for: %v %T %v, err: %v", h.Meta.GetNamespace(), h.Object, h.Meta.GetName(), err) + return result + } + // only queue those quotas that are tracking a resource associated with this kind. + for _, resourceQuotaName := range resourceQuotaNames { + resourceQuota := "av1alpha2.ResourceQuota{} + if err := r.Get(ctx, types.NamespacedName{Name: resourceQuotaName}, resourceQuota); err != nil { + klog.Errorf("failed to get resource quota: %v, err: %v", resourceQuotaName, err) + return result + } + resourceQuotaResources := quotav1.ResourceNames(resourceQuota.Status.Total.Hard) + for _, evaluator := range evaluators { + matchedResources := evaluator.MatchingResources(resourceQuotaResources) + if len(matchedResources) > 0 { + result = append(result, reconcile.Request{NamespacedName: types.NamespacedName{Name: resourceQuotaName}}) + break + } + } + } + klog.V(6).Infof("resource quota reconcile after resource change: %v %T %v, %+v", h.Meta.GetNamespace(), h.Object, h.Meta.GetName(), result) + return result +} + +func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + logger := r.logger.WithValues("resourcequota", req.NamespacedName) + rootCtx := context.TODO() + resourceQuota := "av1alpha2.ResourceQuota{} + if err := r.Get(rootCtx, req.NamespacedName, resourceQuota); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if err := r.bindWorkspace(resourceQuota); err != nil { + logger.Error(err, "failed to set owner reference") + return ctrl.Result{}, err + } + + if err := r.syncQuotaForNamespaces(resourceQuota); err != nil { + logger.Error(err, "failed to sync quota") + return ctrl.Result{}, err + } + + r.recorder.Event(resourceQuota, corev1.EventTypeNormal, "Synced", "Synced successfully") + return ctrl.Result{RequeueAfter: r.resyncPeriod}, nil +} + +func (r *Reconciler) bindWorkspace(resourceQuota *quotav1alpha2.ResourceQuota) error { + workspaceName := resourceQuota.Labels[constants.WorkspaceLabelKey] + if workspaceName == "" { + return nil + } + + workspace := &tenantv1alpha1.Workspace{} + err := r.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace) + if err != nil { + return client.IgnoreNotFound(err) + } + + if !metav1.IsControlledBy(resourceQuota, workspace) { + resourceQuota.OwnerReferences = nil + if err := controllerutil.SetControllerReference(workspace, resourceQuota, r.scheme); err != nil { + return err + } + err = r.Update(context.TODO(), resourceQuota) + if err != nil { + klog.Error(err) + return err + } + } + + return nil +} + +func (r *Reconciler) syncQuotaForNamespaces(originalQuota *quotav1alpha2.ResourceQuota) error { + quota := originalQuota.DeepCopy() + ctx := context.TODO() + // get the list of namespaces that match this cluster quota + matchingNamespaceList := corev1.NamespaceList{} + if err := r.List(ctx, &matchingNamespaceList, &client.ListOptions{LabelSelector: labels.SelectorFromSet(quota.Spec.LabelSelector)}); err != nil { + return err + } + + matchingNamespaceNames := make([]string, 0) + for _, namespace := range matchingNamespaceList.Items { + matchingNamespaceNames = append(matchingNamespaceNames, namespace.Name) + } + + for _, namespace := range matchingNamespaceList.Items { + namespaceName := namespace.Name + namespaceTotals, _ := getResourceQuotasStatusByNamespace(quota.Status.Namespaces, namespaceName) + + actualUsage, err := quotaUsageCalculationFunc(namespaceName, quota.Spec.Quota.Scopes, quota.Spec.Quota.Hard, r.registry, quota.Spec.Quota.ScopeSelector) + if err != nil { + return err + } + recalculatedStatus := corev1.ResourceQuotaStatus{ + Used: actualUsage, + Hard: quota.Spec.Quota.Hard, + } + + // subtract old usage, add new usage + quota.Status.Total.Used = quotav1.Subtract(quota.Status.Total.Used, namespaceTotals.Used) + quota.Status.Total.Used = quotav1.Add(quota.Status.Total.Used, recalculatedStatus.Used) + insertResourceQuotasStatus("a.Status.Namespaces, quotav1alpha2.ResourceQuotaStatusByNamespace{ + Namespace: namespaceName, + ResourceQuotaStatus: recalculatedStatus, + }) + } + + // Remove any namespaces from quota.status that no longer match. + statusCopy := quota.Status.Namespaces.DeepCopy() + for _, namespaceTotals := range statusCopy { + namespaceName := namespaceTotals.Namespace + if !sliceutil.HasString(matchingNamespaceNames, namespaceName) { + quota.Status.Total.Used = quotav1.Subtract(quota.Status.Total.Used, namespaceTotals.Used) + removeResourceQuotasStatusByNamespace("a.Status.Namespaces, namespaceName) + } + } + + quota.Status.Total.Hard = quota.Spec.Quota.Hard + + // if there's no change, no update, return early. NewAggregate returns nil on empty input + if equality.Semantic.DeepEqual(quota, originalQuota) { + return nil + } + + klog.V(6).Infof("update resource quota: %+v", quota) + if err := r.Status().Update(ctx, quota, &client.UpdateOptions{}); err != nil { + return err + } + + return nil +} + +// quotaUsageCalculationFunc is a function to calculate quota usage. It is only configurable for easy unit testing +// NEVER CHANGE THIS OUTSIDE A TEST +var quotaUsageCalculationFunc = quotav1.CalculateUsage diff --git a/pkg/controller/quota/resourcequota_webhook.go b/pkg/controller/quota/resourcequota_webhook.go new file mode 100644 index 000000000..5f047ebb3 --- /dev/null +++ b/pkg/controller/quota/resourcequota_webhook.go @@ -0,0 +1,191 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package quota + +import ( + "context" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilwait "k8s.io/apimachinery/pkg/util/wait" + admissionapi "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog" + "kubesphere.io/kubesphere/kube/pkg/quota/v1" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic" + "kubesphere.io/kubesphere/kube/pkg/quota/v1/install" + "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota" + resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota" + "net/http" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sort" + "sync" +) + +const ( + numEvaluatorThreads = 10 +) + +type ResourceQuotaAdmission struct { + client client.Client + + decoder *webhook.AdmissionDecoder + + lockFactory LockFactory + + // these are used to create the evaluator + registry quota.Registry + + init sync.Once + evaluator resourcequota.Evaluator +} + +func NewResourceQuotaAdmission(client client.Client, scheme *runtime.Scheme) (webhook.AdmissionHandler, error) { + decoder, err := admission.NewDecoder(scheme) + if err != nil { + return nil, err + } + return &ResourceQuotaAdmission{ + client: client, + lockFactory: NewDefaultLockFactory(), + decoder: decoder, + registry: generic.NewRegistry(install.NewQuotaConfigurationForAdmission().Evaluators()), + }, nil +} + +func (r *ResourceQuotaAdmission) Handle(ctx context.Context, req webhook.AdmissionRequest) webhook.AdmissionResponse { + // ignore all operations that correspond to sub-resource actions + if len(req.RequestSubResource) != 0 { + return webhook.Allowed("") + } + // ignore cluster level resources + if len(req.Namespace) == 0 { + return webhook.Allowed("") + } + + r.init.Do(func() { + resourceQuotaAccessor := newQuotaAccessor(r.client) + r.evaluator = resourcequota.NewQuotaEvaluator(resourceQuotaAccessor, install.DefaultIgnoredResources(), r.registry, r.lockAquisition, &resourcequotaapi.Configuration{}, numEvaluatorThreads, utilwait.NeverStop) + }) + + attributesRecord, err := convertToAdmissionAttributes(req) + if err != nil { + klog.Error(err) + return webhook.Errored(http.StatusBadRequest, err) + } + + if err := r.evaluator.Evaluate(attributesRecord); err != nil { + if errors.IsForbidden(err) { + klog.Info(err) + return webhook.Denied(err.Error()) + } + klog.Error(err) + return webhook.Errored(http.StatusInternalServerError, err) + } + + return webhook.Allowed("") +} + +type ByName []corev1.ResourceQuota + +func (v ByName) Len() int { return len(v) } +func (v ByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v ByName) Less(i, j int) bool { return v[i].Name < v[j].Name } + +func (r *ResourceQuotaAdmission) lockAquisition(quotas []corev1.ResourceQuota) func() { + var locks []sync.Locker + + // acquire the locks in alphabetical order because I'm too lazy to think of something clever + sort.Sort(ByName(quotas)) + for _, quota := range quotas { + lock := r.lockFactory.GetLock(string(quota.UID)) + lock.Lock() + locks = append(locks, lock) + } + + return func() { + for i := len(locks) - 1; i >= 0; i-- { + locks[i].Unlock() + } + } +} + +func convertToAdmissionAttributes(req admission.Request) (admissionapi.Attributes, error) { + var err error + var object runtime.Object + if len(req.Object.Raw) > 0 { + object, _, err = scheme.Codecs.UniversalDeserializer().Decode(req.Object.Raw, nil, nil) + if err != nil { + return nil, err + } + } + + var oldObject runtime.Object + if len(req.OldObject.Raw) > 0 { + oldObject, _, err = scheme.Codecs.UniversalDeserializer().Decode(req.OldObject.Raw, nil, nil) + if err != nil { + klog.Error(err) + return nil, err + } + } + + var operationOptions runtime.Object + if len(req.Options.Raw) > 0 { + operationOptions, _, err = scheme.Codecs.UniversalDeserializer().Decode(req.Options.Raw, nil, nil) + if err != nil { + klog.Error(err) + return nil, err + } + } + + extras := map[string][]string{} + for k, v := range req.UserInfo.Extra { + extras[k] = v + } + + attributesRecord := admissionapi.NewAttributesRecord(object, + oldObject, + schema.GroupVersionKind{ + Group: req.RequestKind.Group, + Version: req.RequestKind.Version, + Kind: req.RequestKind.Kind, + }, + req.Namespace, + req.Name, + schema.GroupVersionResource{ + Group: req.RequestResource.Group, + Version: req.RequestResource.Version, + Resource: req.RequestResource.Resource, + }, + req.SubResource, + admissionapi.Operation(req.Operation), + operationOptions, + *req.DryRun, + &user.DefaultInfo{ + Name: req.UserInfo.Username, + UID: req.UserInfo.UID, + Groups: req.UserInfo.Groups, + Extra: extras, + }) + return attributesRecord, nil +} diff --git a/pkg/controller/quota/util.go b/pkg/controller/quota/util.go new file mode 100644 index 000000000..0a8557503 --- /dev/null +++ b/pkg/controller/quota/util.go @@ -0,0 +1,92 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package quota + +import ( + "context" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Following code copied from github.com/openshift/library-go/pkg/quota/quotautil +func getResourceQuotasStatusByNamespace(namespaceStatuses quotav1alpha2.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.ResourceQuotaStatus, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func removeResourceQuotasStatusByNamespace(namespaceStatuses *quotav1alpha2.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1alpha2.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func insertResourceQuotasStatus(namespaceStatuses *quotav1alpha2.ResourceQuotasStatusByNamespace, newStatus quotav1alpha2.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1alpha2.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} + +func resourceQuotaNamesFor(ctx context.Context, client client.Client, namespaceName string) ([]string, error) { + namespace := &corev1.Namespace{} + var resourceQuotaNames []string + if err := client.Get(ctx, types.NamespacedName{Name: namespaceName}, namespace); err != nil { + return resourceQuotaNames, err + } + if len(namespace.Labels) == 0 { + return resourceQuotaNames, nil + } + resourceQuotaList := "av1alpha2.ResourceQuotaList{} + if err := client.List(ctx, resourceQuotaList); err != nil { + return resourceQuotaNames, err + } + for _, resourceQuota := range resourceQuotaList.Items { + if len(resourceQuota.Spec.LabelSelector) > 0 && + labels.SelectorFromSet(resourceQuota.Spec.LabelSelector).Matches(labels.Set(namespace.Labels)) { + resourceQuotaNames = append(resourceQuotaNames, resourceQuota.Name) + } + } + return resourceQuotaNames, nil +} diff --git a/pkg/kapis/tenant/v1alpha2/handler.go b/pkg/kapis/tenant/v1alpha2/handler.go index 65bddea45..617b4678c 100644 --- a/pkg/kapis/tenant/v1alpha2/handler.go +++ b/pkg/kapis/tenant/v1alpha2/handler.go @@ -29,6 +29,7 @@ import ( auditingv1alpha1 "kubesphere.io/kubesphere/pkg/api/auditing/v1alpha1" eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1" loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" "kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer" "kubesphere.io/kubesphere/pkg/apiserver/query" @@ -546,3 +547,78 @@ func (h *tenantHandler) ListClusters(r *restful.Request, response *restful.Respo response.WriteEntity(result) } + +func (h *tenantHandler) CreateWorkspaceResourceQuota(r *restful.Request, response *restful.Response) { + workspaceName := r.PathParameter("workspace") + resourceQuota := "av1alpha2.ResourceQuota{} + err := r.ReadEntity(resourceQuota) + if err != nil { + api.HandleBadRequest(response, r, err) + return + } + result, err := h.tenant.CreateWorkspaceResourceQuota(workspaceName, resourceQuota) + if err != nil { + api.HandleInternalError(response, r, err) + return + } + response.WriteEntity(result) +} + +func (h *tenantHandler) DeleteWorkspaceResourceQuota(r *restful.Request, response *restful.Response) { + workspace := r.PathParameter("workspace") + resourceQuota := r.PathParameter("resourcequota") + + if err := h.tenant.DeleteWorkspaceResourceQuota(workspace, resourceQuota); err != nil { + if errors.IsNotFound(err) { + api.HandleNotFound(response, r, err) + return + } + api.HandleInternalError(response, r, err) + return + } + + response.WriteEntity(servererr.None) +} + +func (h *tenantHandler) UpdateWorkspaceResourceQuota(r *restful.Request, response *restful.Response) { + workspaceName := r.PathParameter("workspace") + resourceQuotaName := r.PathParameter("resourcequota") + resourceQuota := "av1alpha2.ResourceQuota{} + err := r.ReadEntity(resourceQuota) + if err != nil { + api.HandleBadRequest(response, r, err) + return + } + + if resourceQuotaName != resourceQuota.Name { + err := fmt.Errorf("the name of the object (%s) does not match the name on the URL (%s)", resourceQuota.Name, resourceQuotaName) + klog.Errorf("%+v", err) + api.HandleBadRequest(response, r, err) + return + } + + result, err := h.tenant.UpdateWorkspaceResourceQuota(workspaceName, resourceQuota) + if err != nil { + api.HandleInternalError(response, r, err) + return + } + + response.WriteEntity(result) +} + +func (h *tenantHandler) DescribeWorkspaceResourceQuota(r *restful.Request, response *restful.Response) { + workspaceName := r.PathParameter("workspace") + resourceQuotaName := r.PathParameter("resourcequota") + + resourceQuota, err := h.tenant.DescribeWorkspaceResourceQuota(workspaceName, resourceQuotaName) + if err != nil { + if errors.IsNotFound(err) { + api.HandleNotFound(response, r, err) + return + } + api.HandleInternalError(response, r, err) + return + } + + response.WriteEntity(resourceQuota) +} diff --git a/pkg/kapis/tenant/v1alpha2/register.go b/pkg/kapis/tenant/v1alpha2/register.go index cf295e5e4..868b6244b 100644 --- a/pkg/kapis/tenant/v1alpha2/register.go +++ b/pkg/kapis/tenant/v1alpha2/register.go @@ -26,6 +26,7 @@ import ( auditingv1alpha1 "kubesphere.io/kubesphere/pkg/api/auditing/v1alpha1" eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1" loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" "kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer" "kubesphere.io/kubesphere/pkg/apiserver/runtime" @@ -288,6 +289,38 @@ func AddToContainer(c *restful.Container, factory informers.InformerFactory, k8s Writes(auditingv1alpha1.APIResponse{}). Returns(http.StatusOK, api.StatusOK, auditingv1alpha1.APIResponse{})) + ws.Route(ws.POST("/workspaces/{workspace}/resourcequotas"). + To(handler.CreateWorkspaceResourceQuota). + Reads(quotav1alpha2.ResourceQuota{}). + Returns(http.StatusOK, api.StatusOK, quotav1alpha2.ResourceQuota{}). + Doc("Create resource quota."). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag})) + + ws.Route(ws.DELETE("/workspaces/{workspace}/resourcequotas/{resourcequota}"). + To(handler.DeleteWorkspaceResourceQuota). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("resourcequota", "resource quota name")). + Returns(http.StatusOK, api.StatusOK, errors.None). + Doc("Delete resource quota."). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag})) + + ws.Route(ws.PUT("/workspaces/{workspace}/resourcequotas/{resourcequota}"). + To(handler.UpdateWorkspaceResourceQuota). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("resourcequota", "resource quota name")). + Reads(quotav1alpha2.ResourceQuota{}). + Returns(http.StatusOK, api.StatusOK, quotav1alpha2.ResourceQuota{}). + Doc("Update resource quota."). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag})) + + ws.Route(ws.GET("/workspaces/{workspace}/resourcequotas/{resourcequota}"). + To(handler.DescribeWorkspaceResourceQuota). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("resourcequota", "resource quota name")). + Returns(http.StatusOK, api.StatusOK, quotav1alpha2.ResourceQuota{}). + Doc("Describe resource quota."). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag})) + c.Add(ws) return nil } diff --git a/pkg/models/tenant/resourcequota.go b/pkg/models/tenant/resourcequota.go new file mode 100644 index 000000000..22272ed95 --- /dev/null +++ b/pkg/models/tenant/resourcequota.go @@ -0,0 +1,75 @@ +/* + + Copyright 2021 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package tenant + +import ( + "context" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" +) + +func (t *tenantOperator) CreateWorkspaceResourceQuota(workspace string, quota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error) { + if quota.Labels == nil { + quota.Labels = make(map[string]string) + } + quota.Labels[tenantv1alpha1.WorkspaceLabel] = workspace + quota.Spec.LabelSelector = labels.Set{tenantv1alpha1.WorkspaceLabel: workspace} + return t.ksclient.QuotaV1alpha2().ResourceQuotas().Create(context.TODO(), quota, metav1.CreateOptions{}) +} + +func (t *tenantOperator) UpdateWorkspaceResourceQuota(workspace string, quota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error) { + resourceQuota, err := t.ksclient.QuotaV1alpha2().ResourceQuotas().Get(context.TODO(), quota.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + if resourceQuota.Labels[tenantv1alpha1.WorkspaceLabel] != workspace { + return nil, errors.NewNotFound(quotav1alpha2.Resource(quotav1alpha2.ResourcesSingularCluster), resourceQuota.Name) + } + if quota.Labels == nil { + quota.Labels = make(map[string]string) + } + quota.Labels[tenantv1alpha1.WorkspaceLabel] = workspace + quota.Spec.LabelSelector = labels.Set{tenantv1alpha1.WorkspaceLabel: workspace} + return t.ksclient.QuotaV1alpha2().ResourceQuotas().Update(context.TODO(), quota, metav1.UpdateOptions{}) +} + +func (t *tenantOperator) DeleteWorkspaceResourceQuota(workspace string, resourceQuotaName string) error { + resourceQuota, err := t.ksclient.QuotaV1alpha2().ResourceQuotas().Get(context.TODO(), resourceQuotaName, metav1.GetOptions{}) + if err != nil { + return err + } + if resourceQuota.Labels[tenantv1alpha1.WorkspaceLabel] != workspace { + return errors.NewNotFound(quotav1alpha2.Resource(quotav1alpha2.ResourcesSingularCluster), resourceQuotaName) + } + return t.ksclient.QuotaV1alpha2().ResourceQuotas().Delete(context.TODO(), resourceQuotaName, metav1.DeleteOptions{}) +} + +func (t *tenantOperator) DescribeWorkspaceResourceQuota(workspace string, resourceQuotaName string) (*quotav1alpha2.ResourceQuota, error) { + resourceQuota, err := t.ksclient.QuotaV1alpha2().ResourceQuotas().Get(context.TODO(), resourceQuotaName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + if resourceQuota.Labels[tenantv1alpha1.WorkspaceLabel] != workspace { + return nil, errors.NewNotFound(quotav1alpha2.Resource(quotav1alpha2.ResourcesSingularCluster), resourceQuotaName) + } + return resourceQuota, nil +} diff --git a/pkg/models/tenant/tenant.go b/pkg/models/tenant/tenant.go index 86e1ffd76..8d0cd8a47 100644 --- a/pkg/models/tenant/tenant.go +++ b/pkg/models/tenant/tenant.go @@ -38,6 +38,7 @@ import ( eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1" loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2" clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" + quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1" @@ -79,6 +80,10 @@ type Interface interface { PatchNamespace(workspace string, namespace *corev1.Namespace) (*corev1.Namespace, error) PatchWorkspace(workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error) ListClusters(info user.Info) (*api.ListResult, error) + CreateWorkspaceResourceQuota(workspace string, resourceQuota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error) + DeleteWorkspaceResourceQuota(workspace string, resourceQuotaName string) error + UpdateWorkspaceResourceQuota(workspace string, resourceQuota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error) + DescribeWorkspaceResourceQuota(workspace string, resourceQuotaName string) (*quotav1alpha2.ResourceQuota, error) } type tenantOperator struct { diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go new file mode 100644 index 000000000..e4ebd61f8 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + "context" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + "k8s.io/client-go/metadata" + "k8s.io/client-go/metadata/metadatalister" + "k8s.io/client-go/tools/cache" +) + +// NewSharedInformerFactory constructs a new instance of metadataSharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client metadata.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, metav1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of metadataSharedInformerFactory. +// Listers obtained via this factory will be subject to the same filters as specified here. +func NewFilteredSharedInformerFactory(client metadata.Interface, defaultResync time.Duration, namespace string, tweakListOptions TweakListOptionsFunc) SharedInformerFactory { + return &metadataSharedInformerFactory{ + client: client, + defaultResync: defaultResync, + namespace: namespace, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + tweakListOptions: tweakListOptions, + } +} + +type metadataSharedInformerFactory struct { + client metadata.Interface + defaultResync time.Duration + namespace string + + lock sync.Mutex + informers map[schema.GroupVersionResource]informers.GenericInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[schema.GroupVersionResource]bool + tweakListOptions TweakListOptionsFunc +} + +var _ SharedInformerFactory = &metadataSharedInformerFactory{} + +func (f *metadataSharedInformerFactory) ForResource(gvr schema.GroupVersionResource) informers.GenericInformer { + f.lock.Lock() + defer f.lock.Unlock() + + key := gvr + informer, exists := f.informers[key] + if exists { + return informer + } + + informer = NewFilteredMetadataInformer(f.client, gvr, f.namespace, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) + f.informers[key] = informer + + return informer +} + +// Start initializes all requested informers. +func (f *metadataSharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Informer().Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *metadataSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool { + informers := func() map[schema.GroupVersionResource]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[schema.GroupVersionResource]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer.Informer() + } + } + return informers + }() + + res := map[schema.GroupVersionResource]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// NewFilteredMetadataInformer constructs a new informer for a metadata type. +func NewFilteredMetadataInformer(client metadata.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { + return &metadataInformer{ + gvr: gvr, + informer: cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).Watch(context.TODO(), options) + }, + }, + &metav1.PartialObjectMetadata{}, + resyncPeriod, + indexers, + ), + } +} + +type metadataInformer struct { + informer cache.SharedIndexInformer + gvr schema.GroupVersionResource +} + +var _ informers.GenericInformer = &metadataInformer{} + +func (d *metadataInformer) Informer() cache.SharedIndexInformer { + return d.informer +} + +func (d *metadataInformer) Lister() cache.GenericLister { + return metadatalister.NewRuntimeObjectShim(metadatalister.New(d.informer.GetIndexer(), d.gvr)) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go new file mode 100644 index 000000000..732e565c7 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go @@ -0,0 +1,34 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" +) + +// SharedInformerFactory provides access to a shared informer and lister for dynamic client +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + ForResource(gvr schema.GroupVersionResource) informers.GenericInformer + WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool +} + +// TweakListOptionsFunc defines the signature of a helper function +// that wants to provide more listing options to API +type TweakListOptionsFunc func(*metav1.ListOptions) diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/interface.go b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go new file mode 100644 index 000000000..bb3548589 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// Lister helps list resources. +type Lister interface { + // List lists all resources in the indexer. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer with the given name + Get(name string) (*metav1.PartialObjectMetadata, error) + // Namespace returns an object that can list and get resources in a given namespace. + Namespace(namespace string) NamespaceLister +} + +// NamespaceLister helps list and get resources. +type NamespaceLister interface { + // List lists all resources in the indexer for a given namespace. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer for a given namespace and name. + Get(name string) (*metav1.PartialObjectMetadata, error) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/lister.go b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go new file mode 100644 index 000000000..faeccc0fc --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +var _ Lister = &metadataLister{} +var _ NamespaceLister = &metadataNamespaceLister{} + +// metadataLister implements the Lister interface. +type metadataLister struct { + indexer cache.Indexer + gvr schema.GroupVersionResource +} + +// New returns a new Lister. +func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { + return &metadataLister{indexer: indexer, gvr: gvr} +} + +// List lists all resources in the indexer. +func (l *metadataLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAll(l.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer with the given name +func (l *metadataLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} + +// Namespace returns an object that can list and get resources from a given namespace. +func (l *metadataLister) Namespace(namespace string) NamespaceLister { + return &metadataNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} +} + +// metadataNamespaceLister implements the NamespaceLister interface. +type metadataNamespaceLister struct { + indexer cache.Indexer + namespace string + gvr schema.GroupVersionResource +} + +// List lists all resources in the indexer for a given namespace. +func (l *metadataNamespaceLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer for a given namespace and name. +func (l *metadataNamespaceLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/shim.go b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go new file mode 100644 index 000000000..f31c60725 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +var _ cache.GenericLister = &metadataListerShim{} +var _ cache.GenericNamespaceLister = &metadataNamespaceListerShim{} + +// metadataListerShim implements the cache.GenericLister interface. +type metadataListerShim struct { + lister Lister +} + +// NewRuntimeObjectShim returns a new shim for Lister. +// It wraps Lister so that it implements cache.GenericLister interface +func NewRuntimeObjectShim(lister Lister) cache.GenericLister { + return &metadataListerShim{lister: lister} +} + +// List will return all objects across namespaces +func (s *metadataListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := s.lister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve assuming that name==key +func (s *metadataListerShim) Get(name string) (runtime.Object, error) { + return s.lister.Get(name) +} + +func (s *metadataListerShim) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &metadataNamespaceListerShim{ + namespaceLister: s.lister.Namespace(namespace), + } +} + +// metadataNamespaceListerShim implements the NamespaceLister interface. +// It wraps NamespaceLister so that it implements cache.GenericNamespaceLister interface +type metadataNamespaceListerShim struct { + namespaceLister NamespaceLister +} + +// List will return all objects in this namespace +func (ns *metadataNamespaceListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := ns.namespaceLister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve by namespace and name +func (ns *metadataNamespaceListerShim) Get(name string) (runtime.Object, error) { + return ns.namespaceLister.Get(name) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index af18ba87e..7e42a3498 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1424,6 +1424,8 @@ k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 k8s.io/client-go/metadata +k8s.io/client-go/metadata/metadatainformer +k8s.io/client-go/metadata/metadatalister k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1