Merge branch 'master' into metering
Signed-off-by: Rao Yunkun <yunkunrao@yunify.com>
This commit is contained in:
37
.github/workflows/e2e-test.yml
vendored
Normal file
37
.github/workflows/e2e-test.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: e2e
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# run e2e test every 4 hours
|
||||
- cron: 0 */4 * * *
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
build:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.13
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.13
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Downloading go dependencies
|
||||
run: go mod vendor
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.0.0-rc.1
|
||||
with:
|
||||
config: .github/workflows/kind/kind.yaml
|
||||
|
||||
- name: Deploy KubeSphere to Kind
|
||||
run: KIND_CLUSTER_NAME=chart-testing hack/deploy-kubesphere.sh
|
||||
|
||||
- name: Run e2e testing
|
||||
run: go test ./test/e2e
|
||||
11
.github/workflows/kind/kind.yaml
vendored
Normal file
11
.github/workflows/kind/kind.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.19.7
|
||||
extraMounts:
|
||||
- hostPath: /etc/localtime
|
||||
containerPath: /etc/localtime
|
||||
extraPortMappings:
|
||||
- containerPort: 30881
|
||||
hostPort: 9090
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -19,6 +19,7 @@ bin/
|
||||
|
||||
# Vscode files
|
||||
.vscode/
|
||||
__debug_bin
|
||||
|
||||
# OSX trash
|
||||
.DS_Store
|
||||
|
||||
@@ -253,13 +253,9 @@ func addControllers(
|
||||
}
|
||||
|
||||
var ippoolController manager.Runnable
|
||||
ippoolProvider := ippoolclient.NewProvider(kubernetesInformer.Core().V1().Pods(), client.KubeSphere(), client.Kubernetes(), networkOptions.IPPoolType, options)
|
||||
ippoolProvider := ippoolclient.NewProvider(kubernetesInformer, client.KubeSphere(), client.Kubernetes(), networkOptions.IPPoolType, options)
|
||||
if ippoolProvider != nil {
|
||||
ippoolController = ippool.NewIPPoolController(kubesphereInformer.Network().V1alpha1().IPPools(),
|
||||
kubesphereInformer.Network().V1alpha1().IPAMBlocks(),
|
||||
client.Kubernetes(),
|
||||
client.KubeSphere(),
|
||||
ippoolProvider)
|
||||
ippoolController = ippool.NewIPPoolController(kubesphereInformer, kubernetesInformer, client.Kubernetes(), client.KubeSphere(), ippoolProvider)
|
||||
}
|
||||
|
||||
controllers := map[string]manager.Runnable{
|
||||
|
||||
@@ -18,6 +18,7 @@ package options
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -49,6 +50,15 @@ type KubeSphereControllerManagerOptions struct {
|
||||
LeaderElect bool
|
||||
LeaderElection *leaderelection.LeaderElectionConfig
|
||||
WebhookCertDir string
|
||||
|
||||
// KubeSphere is using sigs.k8s.io/application as fundamental object to implement Application Management.
|
||||
// There are other projects also built on sigs.k8s.io/application, when KubeSphere installed along side
|
||||
// them, conflicts happen. So we leave an option to only reconcile applications matched with the given
|
||||
// selector. Default will reconcile all applications.
|
||||
// For example
|
||||
// "kubesphere.io/creator=" means reconcile applications with this label key
|
||||
// "!kubesphere.io/creator" means exclude applications with this key
|
||||
ApplicationSelector string
|
||||
}
|
||||
|
||||
func NewKubeSphereControllerManagerOptions() *KubeSphereControllerManagerOptions {
|
||||
@@ -67,8 +77,9 @@ func NewKubeSphereControllerManagerOptions() *KubeSphereControllerManagerOptions
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
},
|
||||
LeaderElect: false,
|
||||
WebhookCertDir: "",
|
||||
LeaderElect: false,
|
||||
WebhookCertDir: "",
|
||||
ApplicationSelector: "",
|
||||
}
|
||||
|
||||
return s
|
||||
@@ -99,6 +110,11 @@ func (s *KubeSphereControllerManagerOptions) Flags() cliflag.NamedFlagSets {
|
||||
"if not set, webhook server would look up the server key and certificate in"+
|
||||
"{TempDir}/k8s-webhook-server/serving-certs")
|
||||
|
||||
gfs := fss.FlagSet("generic")
|
||||
gfs.StringVar(&s.ApplicationSelector, "application-selector", s.ApplicationSelector, ""+
|
||||
"Only reconcile application(sigs.k8s.io/application) objects match given selector, this could avoid conflicts with "+
|
||||
"other projects built on top of sig-application. Default behavior is to reconcile all of application objects.")
|
||||
|
||||
kfs := fss.FlagSet("klog")
|
||||
local := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(local)
|
||||
@@ -118,6 +134,14 @@ func (s *KubeSphereControllerManagerOptions) Validate() []error {
|
||||
errs = append(errs, s.OpenPitrixOptions.Validate()...)
|
||||
errs = append(errs, s.NetworkOptions.Validate()...)
|
||||
errs = append(errs, s.LdapOptions.Validate()...)
|
||||
|
||||
if len(s.ApplicationSelector) != 0 {
|
||||
_, err := labels.Parse(s.ApplicationSelector)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,9 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"kubesphere.io/kubesphere/pkg/controller/application"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -28,9 +31,9 @@ import (
|
||||
"kubesphere.io/kubesphere/cmd/controller-manager/app/options"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config"
|
||||
appcontroller "kubesphere.io/kubesphere/pkg/controller/application"
|
||||
"kubesphere.io/kubesphere/pkg/controller/namespace"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/webhooks"
|
||||
"kubesphere.io/kubesphere/pkg/controller/quota"
|
||||
"kubesphere.io/kubesphere/pkg/controller/serviceaccount"
|
||||
"kubesphere.io/kubesphere/pkg/controller/user"
|
||||
"kubesphere.io/kubesphere/pkg/controller/workspace"
|
||||
@@ -46,7 +49,6 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3"
|
||||
"kubesphere.io/kubesphere/pkg/utils/metrics"
|
||||
"kubesphere.io/kubesphere/pkg/utils/term"
|
||||
application "sigs.k8s.io/application/controllers"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
|
||||
@@ -194,50 +196,53 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
|
||||
klog.Fatalf("unable add APIs to scheme: %v", err)
|
||||
}
|
||||
|
||||
// register common meta types into schemas.
|
||||
metav1.AddToGroupVersion(mgr.GetScheme(), metav1.SchemeGroupVersion)
|
||||
|
||||
workspaceTemplateReconciler := &workspacetemplate.Reconciler{MultiClusterEnabled: s.MultiClusterOptions.Enable}
|
||||
if err = workspaceTemplateReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create workspace template controller")
|
||||
klog.Fatalf("Unable to create workspace template controller: %v", err)
|
||||
}
|
||||
|
||||
workspaceReconciler := &workspace.Reconciler{}
|
||||
if err = workspaceReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create workspace controller")
|
||||
klog.Fatalf("Unable to create workspace controller: %v", err)
|
||||
}
|
||||
|
||||
workspaceRoleReconciler := &workspacerole.Reconciler{MultiClusterEnabled: s.MultiClusterOptions.Enable}
|
||||
if err = workspaceRoleReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create workspace role controller")
|
||||
klog.Fatalf("Unable to create workspace role controller: %v", err)
|
||||
}
|
||||
|
||||
workspaceRoleBindingReconciler := &workspacerolebinding.Reconciler{MultiClusterEnabled: s.MultiClusterOptions.Enable}
|
||||
if err = workspaceRoleBindingReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create workspace role binding controller")
|
||||
klog.Fatalf("Unable to create workspace role binding controller: %v", err)
|
||||
}
|
||||
|
||||
namespaceReconciler := &namespace.Reconciler{}
|
||||
if err = namespaceReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create namespace controller")
|
||||
}
|
||||
|
||||
err = appcontroller.Add(mgr)
|
||||
if err != nil {
|
||||
klog.Fatal("Unable to create ks application controller")
|
||||
klog.Fatalf("Unable to create namespace controller: %v", err)
|
||||
}
|
||||
|
||||
selector, _ := labels.Parse(s.ApplicationSelector)
|
||||
applicationReconciler := &application.ApplicationReconciler{
|
||||
Scheme: mgr.GetScheme(),
|
||||
Client: mgr.GetClient(),
|
||||
Mapper: mgr.GetRESTMapper(),
|
||||
Log: klogr.New(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Client: mgr.GetClient(),
|
||||
Mapper: mgr.GetRESTMapper(),
|
||||
ApplicationSelector: selector,
|
||||
}
|
||||
if err = applicationReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create application controller")
|
||||
klog.Fatalf("Unable to create application controller: %v", err)
|
||||
}
|
||||
|
||||
saReconciler := &serviceaccount.Reconciler{}
|
||||
|
||||
if err = saReconciler.SetupWithManager(mgr); err != nil {
|
||||
klog.Fatal("Unable to create ServiceAccount controller")
|
||||
klog.Fatalf("Unable to create ServiceAccount controller: %v", err)
|
||||
}
|
||||
|
||||
resourceQuotaReconciler := quota.Reconciler{}
|
||||
if err := resourceQuotaReconciler.SetupWithManager(mgr, quota.DefaultMaxConcurrentReconciles, quota.DefaultResyncPeriod, informerFactory.KubernetesSharedInformerFactory()); err != nil {
|
||||
klog.Fatalf("Unable to create ResourceQuota controller: %v", err)
|
||||
}
|
||||
|
||||
// TODO(jeff): refactor config with CRD
|
||||
@@ -267,10 +272,16 @@ func run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{})
|
||||
hookServer := mgr.GetWebhookServer()
|
||||
|
||||
klog.V(2).Info("registering webhooks to the webhook server")
|
||||
hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}})
|
||||
hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}})
|
||||
hookServer.Register("/validate-network-kubesphere-io-v1alpha1", &webhook.Admission{Handler: &webhooks.ValidatingHandler{C: mgr.GetClient()}})
|
||||
hookServer.Register("/mutate-network-kubesphere-io-v1alpha1", &webhook.Admission{Handler: &webhooks.MutatingHandler{C: mgr.GetClient()}})
|
||||
|
||||
resourceQuotaAdmission, err := quota.NewResourceQuotaAdmission(mgr.GetClient(), mgr.GetScheme())
|
||||
if err != nil {
|
||||
klog.Fatalf("unable to create resource quota admission: %v", err)
|
||||
}
|
||||
hookServer.Register("/validate-quota-kubesphere-io-v1alpha2", &webhook.Admission{Handler: resourceQuotaAdmission})
|
||||
|
||||
klog.V(2).Info("registering metrics to the webhook server")
|
||||
hookServer.Register("/metrics", metrics.Handler())
|
||||
|
||||
|
||||
170
config/crds/quota.kubesphere.io_resourcequotas.yaml
generated
Normal file
170
config/crds/quota.kubesphere.io_resourcequotas.yaml
generated
Normal file
@@ -0,0 +1,170 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (devel)
|
||||
creationTimestamp: null
|
||||
name: resourcequotas.quota.kubesphere.io
|
||||
spec:
|
||||
group: quota.kubesphere.io
|
||||
names:
|
||||
categories:
|
||||
- quota
|
||||
kind: ResourceQuota
|
||||
listKind: ResourceQuotaList
|
||||
plural: resourcequotas
|
||||
singular: resourcequota
|
||||
scope: Cluster
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: WorkspaceResourceQuota sets aggregate quota restrictions enforced
|
||||
per workspace
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata'
|
||||
type: object
|
||||
spec:
|
||||
description: Spec defines the desired quota
|
||||
properties:
|
||||
quota:
|
||||
description: Quota defines the desired quota
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: 'hard is the set of desired hard limits for each named
|
||||
resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/'
|
||||
type: object
|
||||
scopeSelector:
|
||||
description: scopeSelector is also a collection of filters like
|
||||
scopes that must match each object tracked by a quota but expressed
|
||||
using ScopeSelectorOperator in combination with possible values.
|
||||
For a resource to match, both scopes AND scopeSelector (if specified
|
||||
in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
items:
|
||||
description: A scoped-resource selector requirement is a selector
|
||||
that contains values, a scope name, and an operator that
|
||||
relates the scope name and values.
|
||||
properties:
|
||||
operator:
|
||||
description: Represents a scope's relationship to a set
|
||||
of values. Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector applies
|
||||
to.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values. If the operator
|
||||
is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the values
|
||||
array must be empty. This array is replaced during a
|
||||
strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- operator
|
||||
- scopeName
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
scopes:
|
||||
description: A collection of filters that must match each object
|
||||
tracked by a quota. If not specified, the quota matches all objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must match
|
||||
each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
selector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: LabelSelector is used to select projects by label.
|
||||
type: object
|
||||
required:
|
||||
- quota
|
||||
- selector
|
||||
type: object
|
||||
status:
|
||||
description: Status defines the actual enforced quota and its current usage
|
||||
properties:
|
||||
namespaces:
|
||||
description: Namespaces slices the usage by project.
|
||||
items:
|
||||
description: ResourceQuotaStatusByNamespace gives status for a particular
|
||||
project
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: 'Hard is the set of enforced hard limits for each
|
||||
named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/'
|
||||
type: object
|
||||
namespace:
|
||||
description: Namespace the project this status applies to
|
||||
type: string
|
||||
used:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Used is the current observed total usage of the resource
|
||||
in the namespace.
|
||||
type: object
|
||||
required:
|
||||
- namespace
|
||||
type: object
|
||||
type: array
|
||||
total:
|
||||
description: Total defines the actual enforced quota and its current
|
||||
usage across all projects
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: 'Hard is the set of enforced hard limits for each named
|
||||
resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/'
|
||||
type: object
|
||||
used:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Used is the current observed total usage of the resource
|
||||
in the namespace.
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- namespaces
|
||||
- total
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
version: v1alpha2
|
||||
versions:
|
||||
- name: v1alpha2
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
@@ -8,9 +8,9 @@ webhooks:
|
||||
clientConfig:
|
||||
caBundle: <caBundle>
|
||||
service:
|
||||
name: webhook-service
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
path: /validate-email-iam-kubesphere-io-v1alpha2-user
|
||||
path: /validate-email-iam-kubesphere-io-v1alpha2
|
||||
failurePolicy: Fail
|
||||
name: vemail.iam.kubesphere.io
|
||||
rules:
|
||||
@@ -22,19 +22,4 @@ webhooks:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- users
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: webhook-service
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 443
|
||||
selector:
|
||||
app: ks-controller-manager
|
||||
tier: backend
|
||||
- users
|
||||
13
config/webhook/ks-controller-manager.yaml
Normal file
13
config/webhook/ks-controller-manager.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 8443
|
||||
selector:
|
||||
app: ks-controller-manager
|
||||
tier: backend
|
||||
@@ -7,9 +7,9 @@ webhooks:
|
||||
- clientConfig:
|
||||
caBundle: <caBundle>
|
||||
service:
|
||||
name: kubesphere-controller-manager-service
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
path: /validate-nsnp-kubesphere-io-v1alpha1-network
|
||||
path: /validate-network-kubesphere-io-v1alpha1
|
||||
failurePolicy: Fail
|
||||
name: validate.nsnp.kubesphere.io
|
||||
rules:
|
||||
30
config/webhook/quota.yaml
Normal file
30
config/webhook/quota.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: resourcesquotas.quota.kubesphere.io
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: <caBundle>
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
path: /validate-quota-kubesphere-io-v1alpha2
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
matchPolicy: Exact
|
||||
name: resourcesquotas.quota.kubesphere.io
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
apiVersions:
|
||||
- '*'
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- pods
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
@@ -55,7 +55,7 @@ KubeSphere Roadmap demonstrates a list of open source product development plans
|
||||
|
||||
### KubeEdge Integration [#3070](https://github.com/kubesphere/kubesphere/issues/3070)
|
||||
|
||||
- [ ] KubeEdge cloud components setup.
|
||||
- [x] KubeEdge cloud components setup.
|
||||
- [ ] KubeEdge edge nodes setup.
|
||||
- [x] Edge nodes logging and metrics support.
|
||||
- [x] Automatic network configuration on edge node joining/leaving.
|
||||
@@ -71,14 +71,14 @@ KubeSphere Roadmap demonstrates a list of open source product development plans
|
||||
|
||||
- [x] Configure ServiceMonitor via UI. [#1031](https://github.com/kubesphere/console/pull/1301)
|
||||
- [x] PromQL auto-completion and syntax highlighting. [#1307](https://github.com/kubesphere/console/pull/1307)
|
||||
- [ ] Support cluster-level custom monitoring. [#3193](https://github.com/kubesphere/kubesphere/pull/3193)
|
||||
- [ ] Import dashboards from Grafana templates.
|
||||
- [x] Support cluster-level custom monitoring. [#3193](https://github.com/kubesphere/kubesphere/pull/3193)
|
||||
- [x] Tools to convert Grafana dashboard to KubeSphere Dashboard [#9](https://github.com/kubesphere/monitoring-dashboard/pull/9)
|
||||
|
||||
#### Custom Alerting [#3065](https://github.com/kubesphere/kubesphere/issues/3065)
|
||||
|
||||
- [ ] Prometheus alert rule management. [#3181](https://github.com/kubesphere/kubesphere/pull/3181)
|
||||
- [ ] Alert rule tenant control: global/namespace level alert rules. [#3181](https://github.com/kubesphere/kubesphere/pull/3181)
|
||||
- [ ] List alerts for a specific alert rule. [#3181](https://github.com/kubesphere/kubesphere/pull/3181)
|
||||
- [x] Prometheus alert rule management. [#3181](https://github.com/kubesphere/kubesphere/pull/3181)
|
||||
- [x] Alert rule tenant control: global/namespace level alert rules. [#3181](https://github.com/kubesphere/kubesphere/pull/3181)
|
||||
- [x] List alerts for a specific alert rule. [#3181](https://github.com/kubesphere/kubesphere/pull/3181)
|
||||
|
||||
#### Multi-tenant Notification support including Email/DingTalk/Slack/Wechat works/Webhook [#3066](https://github.com/kubesphere/kubesphere/issues/3066)
|
||||
|
||||
@@ -94,20 +94,25 @@ KubeSphere Roadmap demonstrates a list of open source product development plans
|
||||
### Application Lifecycle Management (OpenPitrix)
|
||||
|
||||
- [ ] Refactoring OpenPitrix with CRD, while fix bugs caused by legacy architecture [#3036](https://github.com/kubesphere/kubesphere/issues/3036) [#3001](https://github.com/kubesphere/kubesphere/issues/3001) [#2995](https://github.com/kubesphere/kubesphere/issues/2995) [#2981](https://github.com/kubesphere/kubesphere/issues/2981) [#2954](https://github.com/kubesphere/kubesphere/issues/2954) [#2951](https://github.com/kubesphere/kubesphere/issues/2951) [#2783](https://github.com/kubesphere/kubesphere/issues/2783) [#2713](https://github.com/kubesphere/kubesphere/issues/2713) [#2700](https://github.com/kubesphere/kubesphere/issues/2700) [#1903](https://github.com/kubesphere/kubesphere/issues/1903)
|
||||
- [ ] Support global repo [#1598](https://github.com/kubesphere/kubesphere/issues/1598)
|
||||
|
||||
### Network
|
||||
|
||||
- [ ] IPPool for Calico and VMs [#3057](https://github.com/kubesphere/kubesphere/issues/3057)
|
||||
- [ ] Support for deployment using static IPs [#3058](https://github.com/kubesphere/kubesphere/issues/3058)
|
||||
- [ ] Support for ks-installer with porter as a system component [#3059](https://github.com/kubesphere/kubesphere/issues/3059)
|
||||
- [ ] Support for defining porter-related configuration items in the UI [#3060](https://github.com/kubesphere/kubesphere/issues/3060)
|
||||
- [ ] Support network visualization [#3061](https://github.com/kubesphere/kubesphere/issues/3061) [#583](https://github.com/kubesphere/kubesphere/issues/583)
|
||||
- [x] IPPool for Calico and VMs [#3057](https://github.com/kubesphere/kubesphere/issues/3057)
|
||||
- [x] Support for deployment using static IPs [#3058](https://github.com/kubesphere/kubesphere/issues/3058)
|
||||
- [x] Support network visualization [#3061](https://github.com/kubesphere/kubesphere/issues/3061) [#583](https://github.com/kubesphere/kubesphere/issues/583)
|
||||
|
||||
### Metering
|
||||
|
||||
- [ ] Support for viewing resource consumption at the cluster, workspace, and application template levels [#3062](https://github.com/kubesphere/kubesphere/issues/3062)
|
||||
|
||||
### MultiCluster:
|
||||
|
||||
- [x] Validate member cluster configuration if the member cluster config(e.g. jwtSecret) is not same as host cluster, which can reduce the complexity of joining clusters. [#3232](https://github.com/kubesphere/kubesphere/issues/3232)
|
||||
- [x] Support configuring cluster controller resync period, some users need to update the cluster component more quickly. [#3213](https://github.com/kubesphere/kubesphere/issues/3213)
|
||||
- [x] Lightweight member cluster installation, support running kubepshere without redis and ldap and so on. [#3056](https://github.com/kubesphere/kubesphere/issues/3056)
|
||||
- [x] Refactor cluster controller(it should updates the cluster.staus field instead of the tower server when using proxy connection). [#3234](https://github.com/kubesphere/kubesphere/issues/3234)
|
||||
- [ ] Support the tower server and agent running highly available(server with leader election option and running more than one agent). [#31](https://github.com/kubesphere/tower/issues/31)
|
||||
|
||||
## **Upgrade:**
|
||||
|
||||
- [x] Upgrade isito version from 1.4.8 => 1.6.10[#3326](https://github.com/kubesphere/kubesphere/issues/3236)
|
||||
@@ -122,16 +127,10 @@ KubeSphere Roadmap demonstrates a list of open source product development plans
|
||||
- [x] Using human-readable error message for pipeline cron text , [#2919](https://github.com/kubesphere/kubesphere/issues/2919)
|
||||
- [ ] Using human-readable error message for S2I, [#140](https://github.com/kubesphere/s2ioperator/issues/140)
|
||||
|
||||
### MultiCluster:
|
||||
|
||||
- [x] Validate member cluster configuration if the member cluster config(e.g. jwtSecret) is not same as host cluster, which can reduce the complexity of joining clusters. [#3232](https://github.com/kubesphere/kubesphere/issues/3232)
|
||||
- [ ] Support configuring cluster controller resync period, some users need to update the cluster component more quickly. [#3213](https://github.com/kubesphere/kubesphere/issues/3213)
|
||||
- [x] Lightweight member cluster installation, support running kubepshere without redis and ldap and so on. [#3056](https://github.com/kubesphere/kubesphere/issues/3056)
|
||||
- [ ] The cluster controller updates the cluster.staus field instead of the tower server when using proxy connection. [#3234](https://github.com/kubesphere/kubesphere/issues/3234)
|
||||
- [ ] Support the tower server and agent running highly available(server with leader election option and running more than one agent). [#31](https://github.com/kubesphere/tower/issues/31)
|
||||
- [ ] Support adding member clusters for an existing project(ns), wich can be helpful when there are new clusters joined. [#3246](https://github.com/kubesphere/kubesphere/issues/3246)
|
||||
- [ ] https://github.com/kubesphere/tower/issues/31)
|
||||
### Observability
|
||||
|
||||
- [ ] Upgrade to Prometheus v2.25.0
|
||||
- [x] Upgrade Notification Manager to v0.7.0+ [Releases](https://github.com/kubesphere/notification-manager/releases)
|
||||
- [x] Upgrade FluentBit Operator to v0.3.0+ [Releases](https://github.com/kubesphere/fluentbit-operator/releases)
|
||||
- [ ] Upgrade FluentBit to v1.6.9+
|
||||
|
||||
33
go.mod
33
go.mod
@@ -43,9 +43,11 @@ require (
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/jszwec/csvutil v1.5.0
|
||||
github.com/kelseyhightower/envconfig v1.4.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0
|
||||
github.com/kubesphere/sonargo v0.0.2
|
||||
github.com/mitchellh/mapstructure v1.2.2
|
||||
@@ -56,7 +58,7 @@ require (
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/projectcalico/kube-controllers v3.8.8+incompatible
|
||||
github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce
|
||||
github.com/projectcalico/libcalico-go v1.7.2-0.20191014160346-2382c6cdd056
|
||||
github.com/prometheus-community/prom-label-proxy v0.2.0
|
||||
github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1
|
||||
@@ -70,12 +72,14 @@ require (
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/xanzy/ssh-agent v0.2.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect
|
||||
google.golang.org/grpc v1.30.0
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/cas.v2 v2.2.0
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.4.0
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect
|
||||
gopkg.in/src-d/go-git.v4 v4.11.0
|
||||
@@ -190,6 +194,7 @@ replace (
|
||||
github.com/brancz/kube-rbac-proxy => github.com/brancz/kube-rbac-proxy v0.5.0
|
||||
github.com/bshuster-repo/logrus-logstash-hook => github.com/bshuster-repo/logrus-logstash-hook v0.4.1
|
||||
github.com/bugsnag/bugsnag-go => github.com/bugsnag/bugsnag-go v1.5.0
|
||||
github.com/bugsnag/osext => github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b
|
||||
github.com/bugsnag/panicwrap => github.com/bugsnag/panicwrap v1.2.0
|
||||
github.com/c-bata/go-prompt => github.com/c-bata/go-prompt v0.2.2
|
||||
github.com/campoy/embedmd => github.com/campoy/embedmd v1.0.0
|
||||
@@ -208,6 +213,7 @@ replace (
|
||||
github.com/circonus-labs/circonus-gometrics => github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
|
||||
github.com/circonus-labs/circonusllhist => github.com/circonus-labs/circonusllhist v0.1.3
|
||||
github.com/clbanning/x2j => github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec
|
||||
github.com/cloudflare/cfssl => github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
|
||||
github.com/cockroachdb/apd => github.com/cockroachdb/apd v1.1.0
|
||||
github.com/cockroachdb/cockroach-go => github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
|
||||
github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa
|
||||
@@ -215,6 +221,9 @@ replace (
|
||||
github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.2.0
|
||||
github.com/containerd/containerd => github.com/containerd/containerd v1.3.0
|
||||
github.com/containerd/continuity => github.com/containerd/continuity v0.0.0-20181203112020-004b46473808
|
||||
github.com/containerd/fifo => github.com/containerd/fifo v0.0.0-20210129194248-f8e8fdba47ef
|
||||
github.com/containerd/ttrpc => github.com/containerd/ttrpc v1.0.2
|
||||
github.com/containerd/typeurl => github.com/containerd/typeurl v1.0.1
|
||||
github.com/containernetworking/cni => github.com/containernetworking/cni v0.8.0
|
||||
github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.3
|
||||
github.com/coreos/etcd => github.com/coreos/etcd v3.3.17+incompatible
|
||||
@@ -248,16 +257,21 @@ replace (
|
||||
github.com/dhui/dktest => github.com/dhui/dktest v0.3.0
|
||||
github.com/disintegration/imaging => github.com/disintegration/imaging v1.6.1
|
||||
github.com/docker/cli => github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d
|
||||
github.com/docker/compose-on-kubernetes => github.com/docker/compose-on-kubernetes v0.4.24
|
||||
github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker => github.com/docker/engine v1.4.2-0.20190822205725-ed20165a37b4
|
||||
github.com/docker/docker-credential-helpers => github.com/docker/docker-credential-helpers v0.6.1
|
||||
github.com/docker/go => github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c
|
||||
github.com/docker/go-connections => github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-events => github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
|
||||
github.com/docker/go-metrics => github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82
|
||||
github.com/docker/go-units => github.com/docker/go-units v0.4.0
|
||||
github.com/docker/libtrust => github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
|
||||
github.com/docker/spdystream => github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c
|
||||
github.com/docker/swarmkit => github.com/docker/swarmkit v1.12.0
|
||||
github.com/docopt/docopt-go => github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
|
||||
github.com/dustin/go-humanize => github.com/dustin/go-humanize v1.0.0
|
||||
github.com/dvsekhvalnov/jose2go => github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae
|
||||
github.com/eapache/go-resiliency => github.com/eapache/go-resiliency v1.1.0
|
||||
github.com/eapache/go-xerial-snappy => github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
|
||||
github.com/eapache/queue => github.com/eapache/queue v1.1.0
|
||||
@@ -361,6 +375,7 @@ replace (
|
||||
github.com/gomodule/redigo => github.com/gomodule/redigo v2.0.0+incompatible
|
||||
github.com/google/addlicense => github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76
|
||||
github.com/google/btree => github.com/google/btree v1.0.0
|
||||
github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93
|
||||
github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0
|
||||
github.com/google/go-cmp => github.com/google/go-cmp v0.4.0
|
||||
github.com/google/go-github => github.com/google/go-github v17.0.0+incompatible
|
||||
@@ -370,6 +385,7 @@ replace (
|
||||
github.com/google/martian => github.com/google/martian v2.1.0+incompatible
|
||||
github.com/google/pprof => github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a
|
||||
github.com/google/renameio => github.com/google/renameio v0.1.0
|
||||
github.com/google/shlex => github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/uuid => github.com/google/uuid v1.1.1
|
||||
github.com/googleapis/gax-go => github.com/googleapis/gax-go v2.0.2+incompatible
|
||||
github.com/googleapis/gax-go/v2 => github.com/googleapis/gax-go/v2 v2.0.5
|
||||
@@ -438,6 +454,7 @@ replace (
|
||||
github.com/jstemmer/go-junit-report => github.com/jstemmer/go-junit-report v0.9.1
|
||||
github.com/jsternberg/zap-logfmt => github.com/jsternberg/zap-logfmt v1.0.0
|
||||
github.com/jtolds/gls => github.com/jtolds/gls v4.20.0+incompatible
|
||||
github.com/juju/loggo => github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8
|
||||
github.com/julienschmidt/httprouter => github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/jung-kurt/gofpdf => github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5
|
||||
github.com/jwilder/encoding => github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef
|
||||
@@ -489,6 +506,7 @@ replace (
|
||||
github.com/mdlayher/wifi => github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee
|
||||
github.com/mgutz/ansi => github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b
|
||||
github.com/miekg/dns => github.com/miekg/dns v1.1.29
|
||||
github.com/miekg/pkcs11 => github.com/miekg/pkcs11 v1.0.2
|
||||
github.com/minio/md5-simd => github.com/minio/md5-simd v1.1.0
|
||||
github.com/minio/minio-go/v7 => github.com/minio/minio-go/v7 v7.0.2
|
||||
github.com/minio/sha256-simd => github.com/minio/sha256-simd v0.1.1
|
||||
@@ -518,6 +536,7 @@ replace (
|
||||
github.com/nats-io/nkeys => github.com/nats-io/nkeys v0.1.3
|
||||
github.com/nats-io/nuid => github.com/nats-io/nuid v1.0.1
|
||||
github.com/ncw/swift => github.com/ncw/swift v1.0.50
|
||||
github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e
|
||||
github.com/nxadm/tail => github.com/nxadm/tail v1.4.4
|
||||
github.com/oklog/oklog => github.com/oklog/oklog v0.3.2
|
||||
github.com/oklog/run => github.com/oklog/run v1.1.0
|
||||
@@ -530,6 +549,7 @@ replace (
|
||||
github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc => github.com/opencontainers/runc v0.1.1
|
||||
github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.2
|
||||
github.com/opentracing-contrib/go-grpc => github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02
|
||||
github.com/opentracing-contrib/go-observer => github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492
|
||||
github.com/opentracing-contrib/go-stdlib => github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9
|
||||
@@ -560,7 +580,7 @@ replace (
|
||||
github.com/projectcalico/go-yaml => github.com/projectcalico/go-yaml v0.0.0-20161201183616-955bc3e451ef
|
||||
github.com/projectcalico/go-yaml-wrapper => github.com/projectcalico/go-yaml-wrapper v0.0.0-20161127220527-598e54215bee
|
||||
github.com/projectcalico/kube-controllers => github.com/projectcalico/kube-controllers v3.8.8+incompatible
|
||||
github.com/projectcalico/libcalico-go => github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce
|
||||
github.com/projectcalico/libcalico-go => github.com/projectcalico/libcalico-go v1.7.2-0.20191014160346-2382c6cdd056
|
||||
github.com/prometheus-community/prom-label-proxy => github.com/prometheus-community/prom-label-proxy v0.2.0
|
||||
github.com/prometheus-operator/prometheus-operator => github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring => github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.42.1
|
||||
@@ -615,7 +635,9 @@ replace (
|
||||
github.com/streadway/handy => github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a
|
||||
github.com/stretchr/objx => github.com/stretchr/objx v0.2.0
|
||||
github.com/stretchr/testify => github.com/stretchr/testify v1.4.0
|
||||
github.com/syndtr/gocapability => github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
github.com/thanos-io/thanos => github.com/thanos-io/thanos v0.13.1-0.20200910143741-e0b7f7b32e9c
|
||||
github.com/theupdateframework/notary => github.com/theupdateframework/notary v0.7.0
|
||||
github.com/tidwall/pretty => github.com/tidwall/pretty v1.0.0
|
||||
github.com/tinylib/msgp => github.com/tinylib/msgp v1.1.0
|
||||
github.com/tmc/grpc-websocket-proxy => github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5
|
||||
@@ -669,6 +691,7 @@ replace (
|
||||
golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
||||
golang.org/x/sync => golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
|
||||
golang.org/x/term => golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
|
||||
golang.org/x/text => golang.org/x/text v0.3.0
|
||||
golang.org/x/time => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
golang.org/x/tools => golang.org/x/tools v0.0.0-20190710153321-831012c29e42
|
||||
@@ -687,6 +710,7 @@ replace (
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 => gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc
|
||||
gopkg.in/asn1-ber.v1 => gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d
|
||||
gopkg.in/cas.v2 => gopkg.in/cas.v2 v2.2.0
|
||||
gopkg.in/cenkalti/backoff.v2 => gopkg.in/cenkalti/backoff.v2 v2.2.1
|
||||
gopkg.in/check.v1 => gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
|
||||
gopkg.in/cheggaaa/pb.v1 => gopkg.in/cheggaaa/pb.v1 v1.0.25
|
||||
gopkg.in/errgo.v2 => gopkg.in/errgo.v2 v2.1.0
|
||||
@@ -702,6 +726,7 @@ replace (
|
||||
gopkg.in/ini.v1 => gopkg.in/ini.v1 v1.57.0
|
||||
gopkg.in/mail.v2 => gopkg.in/mail.v2 v2.3.1
|
||||
gopkg.in/natefinch/lumberjack.v2 => gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
gopkg.in/rethinkdb/rethinkdb-go.v6 => gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1
|
||||
gopkg.in/square/go-jose.v1 => gopkg.in/square/go-jose.v1 v1.1.2
|
||||
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.4.0
|
||||
gopkg.in/src-d/go-billy.v4 => gopkg.in/src-d/go-billy.v4 v4.3.0
|
||||
@@ -710,7 +735,6 @@ replace (
|
||||
gopkg.in/tchap/go-patricia.v2 => gopkg.in/tchap/go-patricia.v2 v2.2.6
|
||||
gopkg.in/tomb.v1 => gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
|
||||
gopkg.in/warnings.v0 => gopkg.in/warnings.v0 v0.1.2
|
||||
gopkg.in/yaml.v1 => gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0
|
||||
gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
|
||||
gotest.tools => gotest.tools v2.2.0+incompatible
|
||||
@@ -736,7 +760,6 @@ replace (
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.18.6
|
||||
k8s.io/metrics => k8s.io/metrics v0.18.6
|
||||
k8s.io/utils => k8s.io/utils v0.0.0-20200603063816-c1c6865ac451
|
||||
|
||||
kubesphere.io/client-go => ./staging/src/kubesphere.io/client-go
|
||||
kubesphere.io/im => kubesphere.io/im v0.1.0
|
||||
openpitrix.io/iam => openpitrix.io/iam v0.1.0
|
||||
|
||||
15
go.sum
15
go.sum
@@ -130,11 +130,14 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s=
|
||||
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
|
||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI=
|
||||
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
@@ -172,14 +175,17 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
|
||||
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ=
|
||||
github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d h1:qdD+BtyCE1XXpDyhvn0yZVcZOLILdj9Cw4pKu0kQbPQ=
|
||||
github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g=
|
||||
github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/engine v1.4.2-0.20190822205725-ed20165a37b4 h1:+VAGRKyn9Ca+ckzV/PJsaRO7UXO9KQjFmSffcSDrWdE=
|
||||
github.com/docker/engine v1.4.2-0.20190822205725-ed20165a37b4/go.mod h1:3CPr2caMgTHxxIAZgEMd3uLYPDlRvPqCpyeRf6ncPcY=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 h1:X0fj836zx99zFu83v/M79DuBn84IL/Syx1SY6Y5ZEMA=
|
||||
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@@ -311,6 +317,7 @@ github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6/go.mod h1:K/9g3pPouf13
|
||||
github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0 h1:kFkMAZBNAn4j7K0GiZr8cRYzejq68VbheufiV3YuyFI=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
@@ -386,6 +393,7 @@ github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR3
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
@@ -556,6 +564,7 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
@@ -598,8 +607,8 @@ github.com/projectcalico/go-yaml-wrapper v0.0.0-20161127220527-598e54215bee h1:y
|
||||
github.com/projectcalico/go-yaml-wrapper v0.0.0-20161127220527-598e54215bee/go.mod h1:UgC0aTQ2KMDxlX3lU/stndk7DMUBJqzN40yFiILHgxc=
|
||||
github.com/projectcalico/kube-controllers v3.8.8+incompatible h1:ZbCg0wJ+gd7i81CB6vOASiUN//oR4ZBl+wEdy0Vk1uI=
|
||||
github.com/projectcalico/kube-controllers v3.8.8+incompatible/go.mod h1:ZEafKeKN5wiNARRw1LZP8l10uEfp04C7redU848MMZw=
|
||||
github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce h1:O/R67iwUe8TvZwgKbDB2cvF2/8L8PR4zVOcBtYEHD5Y=
|
||||
github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce/go.mod h1:z4tuFqrAg/423AMSaDamY5LgqeOZ5ETui6iOxDwJ/ag=
|
||||
github.com/projectcalico/libcalico-go v1.7.2-0.20191014160346-2382c6cdd056 h1:qs29Hus4cY8XlsmMLUsSAHT0metSTyqu2Tnpuwy5dkM=
|
||||
github.com/projectcalico/libcalico-go v1.7.2-0.20191014160346-2382c6cdd056/go.mod h1:tUt8rirmysRy7TR1S80XDriwBK1z2igwwX79lnUrSf4=
|
||||
github.com/prometheus-community/prom-label-proxy v0.2.0 h1:2cNKhNjbTmmEDvBTW/6WUsE2x7bh76rBMZVBn4ey6To=
|
||||
github.com/prometheus-community/prom-label-proxy v0.2.0/go.mod h1:XdjyZg7LCbCC5FADHtpgNp6kQ0W9beXVGfmcvndMj5Y=
|
||||
github.com/prometheus-operator/prometheus-operator v0.42.2-0.20200928114327-fbd01683839a h1:21yBrtc90hdEhJaL815CHNV0GW0DEdgxDOiL5OYoSHo=
|
||||
@@ -814,7 +823,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
@@ -893,4 +901,5 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnM
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc h1:MksmcCZQWAQJCTA5T0jgI/0sJ51AVm4Z41MrmfczEoc=
|
||||
vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
|
||||
|
||||
56
hack/deploy-kubesphere.sh
Executable file
56
hack/deploy-kubesphere.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2020 The KubeSphere Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
function wait_for_installation_finish() {
|
||||
echo "waiting for ks-installer pod ready"
|
||||
kubectl -n kubesphere-system wait --timeout=180s --for=condition=Ready $(kubectl -n kubesphere-system get pod -l app=ks-install -oname)
|
||||
echo "waiting for KubeSphere ready"
|
||||
while IFS= read -r line; do
|
||||
if [[ $line =~ "Welcome to KubeSphere" ]]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done < <(timeout 900 kubectl logs -n kubesphere-system deploy/ks-installer -f)
|
||||
}
|
||||
|
||||
# Use kubespheredev and latest tag as default image
|
||||
TAG="${TAG:-latest}"
|
||||
REPO="${REPO:-kubespheredev}"
|
||||
|
||||
# Use KIND_LOAD_IMAGE=y .hack/deploy-kubesphere.sh to load
|
||||
# the built docker image into kind before deploying.
|
||||
if [[ "${KIND_LOAD_IMAGE:-}" == "y" ]]; then
|
||||
kind load docker-image "$REPO/ks-apiserver:$TAG" --name="${KIND_CLUSTER_NAME:-kind}"
|
||||
kind load docker-image "$REPO/ks-controller-manager:$TAG" --name="${KIND_CLUSTER_NAME:-kind}"
|
||||
fi
|
||||
|
||||
# Download the latest ks-install to deploy KubeSphere
|
||||
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml
|
||||
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml
|
||||
|
||||
#TODO: override ks-apiserver and ks-controller-manager images with specific tag
|
||||
|
||||
kubectl apply -f kubesphere-installer.yaml
|
||||
kubectl apply -f cluster-configuration.yaml
|
||||
|
||||
wait_for_installation_finish
|
||||
|
||||
# Expose KubeSphere API Server
|
||||
kubectl -n kubesphere-system patch svc ks-apiserver -p '{"spec":{"type":"NodePort","ports":[{"name":"ks-apiserver","port":80,"protocal":"TCP","targetPort":9090,"nodePort":30881}]}}'
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 tenant:v1alpha2 devops:v1alpha1 iam:v1alpha2 devops:v1alpha3 cluster:v1alpha1 storage:v1alpha1 auditing:v1alpha1 types:v1beta1"
|
||||
GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 tenant:v1alpha2 devops:v1alpha1 iam:v1alpha2 devops:v1alpha3 cluster:v1alpha1 storage:v1alpha1 auditing:v1alpha1 types:v1beta1 quota:v1alpha2"
|
||||
|
||||
rm -rf ./pkg/client
|
||||
./hack/generate_group.sh "client,lister,informer" kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "$GV" --output-base=./ -h "$PWD/hack/boilerplate.go.txt"
|
||||
|
||||
@@ -184,5 +184,12 @@ go mod vendor >>"${LOG_FILE}" 2>&1
|
||||
awk '{if($1=="#") print $2 " " $0; else print}' < vendor/modules.txt | sort -k1,1 -s | sed 's/.*#/#/' > "${TMP_DIR}/modules.txt.tmp"
|
||||
mv "${TMP_DIR}/modules.txt.tmp" vendor/modules.txt
|
||||
|
||||
# create a symlink in vendor directory pointing to the staging components.
|
||||
# This lets other packages and tools use the local staging components as if they were vendored.
|
||||
for repo in $(kube::util::list_staging_repos); do
|
||||
rm -fr "${KUBE_ROOT}/vendor/kubesphere.io/${repo}"
|
||||
ln -s "../../staging/src/kubesphere.io/${repo}" "${KUBE_ROOT}/vendor/kubesphere.io/${repo}"
|
||||
done
|
||||
|
||||
#kube::log::status "vendor: updating LICENSES file"
|
||||
#hack/update-vendor-licenses.sh >>"${LOG_FILE}" 2>&1
|
||||
|
||||
336
kube/pkg/api/v1/pod/util.go
Normal file
336
kube/pkg/api/v1/pod/util.go
Normal file
@@ -0,0 +1,336 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"kubesphere.io/kubesphere/kube/pkg/features"
|
||||
)
|
||||
|
||||
// FindPort locates the container port for the given pod and portName. If the
|
||||
// targetPort is a number, use that. If the targetPort is a string, look that
|
||||
// string up in all named ports in all containers in the target pod. If no
|
||||
// match is found, fail.
|
||||
func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) {
|
||||
portName := svcPort.TargetPort
|
||||
switch portName.Type {
|
||||
case intstr.String:
|
||||
name := portName.StrVal
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
return int(port.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case intstr.Int:
|
||||
return portName.IntValue(), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
|
||||
}
|
||||
|
||||
// ContainerVisitor is called with each container spec, and returns true
|
||||
// if visiting should continue.
|
||||
type ContainerVisitor func(container *v1.Container) (shouldContinue bool)
|
||||
|
||||
// VisitContainers invokes the visitor function with a pointer to the container
|
||||
// spec of every container in the given pod spec. If visitor returns false,
|
||||
// visiting is short-circuited. VisitContainers returns true if visiting completes,
|
||||
// false if visiting was short-circuited.
|
||||
func VisitContainers(podSpec *v1.PodSpec, visitor ContainerVisitor) bool {
|
||||
for i := range podSpec.InitContainers {
|
||||
if !visitor(&podSpec.InitContainers[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i := range podSpec.Containers {
|
||||
if !visitor(&podSpec.Containers[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
|
||||
for i := range podSpec.EphemeralContainers {
|
||||
if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Visitor is called with each object name, and returns true if visiting should continue
|
||||
type Visitor func(name string) (shouldContinue bool)
|
||||
|
||||
// VisitPodSecretNames invokes the visitor function with the name of every secret
|
||||
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
|
||||
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
|
||||
// Returns true if visiting completed, false if visiting was short-circuited.
|
||||
func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
|
||||
for _, reference := range pod.Spec.ImagePullSecrets {
|
||||
if !visitor(reference.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
VisitContainers(&pod.Spec, func(c *v1.Container) bool {
|
||||
return visitContainerSecretNames(c, visitor)
|
||||
})
|
||||
var source *v1.VolumeSource
|
||||
|
||||
for i := range pod.Spec.Volumes {
|
||||
source = &pod.Spec.Volumes[i].VolumeSource
|
||||
switch {
|
||||
case source.AzureFile != nil:
|
||||
if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) {
|
||||
return false
|
||||
}
|
||||
case source.CephFS != nil:
|
||||
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.Cinder != nil:
|
||||
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.FlexVolume != nil:
|
||||
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.Projected != nil:
|
||||
for j := range source.Projected.Sources {
|
||||
if source.Projected.Sources[j].Secret != nil {
|
||||
if !visitor(source.Projected.Sources[j].Secret.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
case source.RBD != nil:
|
||||
if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.Secret != nil:
|
||||
if !visitor(source.Secret.SecretName) {
|
||||
return false
|
||||
}
|
||||
case source.ScaleIO != nil:
|
||||
if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.ISCSI != nil:
|
||||
if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.StorageOS != nil:
|
||||
if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.CSI != nil:
|
||||
if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.SecretRef != nil {
|
||||
if !visitor(env.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, envVar := range container.Env {
|
||||
if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil {
|
||||
if !visitor(envVar.ValueFrom.SecretKeyRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// VisitPodConfigmapNames invokes the visitor function with the name of every configmap
|
||||
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
|
||||
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
|
||||
// Returns true if visiting completed, false if visiting was short-circuited.
|
||||
func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
|
||||
VisitContainers(&pod.Spec, func(c *v1.Container) bool {
|
||||
return visitContainerConfigmapNames(c, visitor)
|
||||
})
|
||||
var source *v1.VolumeSource
|
||||
for i := range pod.Spec.Volumes {
|
||||
source = &pod.Spec.Volumes[i].VolumeSource
|
||||
switch {
|
||||
case source.Projected != nil:
|
||||
for j := range source.Projected.Sources {
|
||||
if source.Projected.Sources[j].ConfigMap != nil {
|
||||
if !visitor(source.Projected.Sources[j].ConfigMap.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
case source.ConfigMap != nil:
|
||||
if !visitor(source.ConfigMap.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.ConfigMapRef != nil {
|
||||
if !visitor(env.ConfigMapRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, envVar := range container.Env {
|
||||
if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil {
|
||||
if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetContainerStatus extracts the status of container "name" from "statuses".
|
||||
// It also returns if "name" exists.
|
||||
func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i], true
|
||||
}
|
||||
}
|
||||
return v1.ContainerStatus{}, false
|
||||
}
|
||||
|
||||
// GetExistingContainerStatus extracts the status of container "name" from "statuses",
|
||||
// It also returns if "name" exists.
|
||||
func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus {
|
||||
status, _ := GetContainerStatus(statuses, name)
|
||||
return status
|
||||
}
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
// Precondition for an available pod is that it must be ready. On top
|
||||
// of that, there are two cases when a pod can be considered available:
|
||||
// 1. minReadySeconds == 0, or
|
||||
// 2. LastTransitionTime (is set) + minReadySeconds < current time
|
||||
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool {
|
||||
if !IsPodReady(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := GetPodReadyCondition(pod.Status)
|
||||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
|
||||
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReady(pod *v1.Pod) bool {
|
||||
return IsPodReadyConditionTrue(pod.Status)
|
||||
}
|
||||
|
||||
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
|
||||
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
|
||||
condition := GetPodReadyCondition(status)
|
||||
return condition != nil && condition.Status == v1.ConditionTrue
|
||||
}
|
||||
|
||||
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
|
||||
// Returns nil if the condition is not present.
|
||||
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
|
||||
_, condition := GetPodCondition(&status, v1.PodReady)
|
||||
return condition
|
||||
}
|
||||
|
||||
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
return GetPodConditionFromList(status.Conditions, conditionType)
|
||||
}
|
||||
|
||||
// GetPodConditionFromList extracts the provided condition from the given list of condition and
|
||||
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
|
||||
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
|
||||
if conditions == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range conditions {
|
||||
if conditions[i].Type == conditionType {
|
||||
return i, &conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
|
||||
// status has changed.
|
||||
// Returns true if pod condition has changed or has been added.
|
||||
func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool {
|
||||
condition.LastTransitionTime = metav1.Now()
|
||||
// Try to find this pod condition.
|
||||
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
|
||||
|
||||
if oldCondition == nil {
|
||||
// We are adding new pod condition.
|
||||
status.Conditions = append(status.Conditions, *condition)
|
||||
return true
|
||||
}
|
||||
// We are updating an existing condition, so we need to check if it has changed.
|
||||
if condition.Status == oldCondition.Status {
|
||||
condition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
|
||||
isEqual := condition.Status == oldCondition.Status &&
|
||||
condition.Reason == oldCondition.Reason &&
|
||||
condition.Message == oldCondition.Message &&
|
||||
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) &&
|
||||
condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime)
|
||||
|
||||
status.Conditions[conditionIndex] = *condition
|
||||
// Return true if one of the fields have changed.
|
||||
return !isEqual
|
||||
}
|
||||
|
||||
// GetPodPriority returns priority of the given pod.
|
||||
func GetPodPriority(pod *v1.Pod) int32 {
|
||||
if pod.Spec.Priority != nil {
|
||||
return *pod.Spec.Priority
|
||||
}
|
||||
// When priority of a running pod is nil, it means it was created at a time
|
||||
// that there was no global default priority class and the priority class
|
||||
// name of the pod was empty. So, we resolve to the static default priority.
|
||||
return 0
|
||||
}
|
||||
51
kube/pkg/apis/core/helper/helpers.go
Normal file
51
kube/pkg/apis/core/helper/helpers.go
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package helper
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
// Semantic can do semantic deep equality checks for core objects.
|
||||
// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
|
||||
var Semantic = conversion.EqualitiesOrDie(
|
||||
func(a, b resource.Quantity) bool {
|
||||
// Ignore formatting, only care that numeric value stayed the same.
|
||||
// TODO: if we decide it's important, it should be safe to start comparing the format.
|
||||
//
|
||||
// Uninitialized quantities are equivalent to 0 quantities.
|
||||
return a.Cmp(b) == 0
|
||||
},
|
||||
func(a, b metav1.MicroTime) bool {
|
||||
return a.UTC() == b.UTC()
|
||||
},
|
||||
func(a, b metav1.Time) bool {
|
||||
return a.UTC() == b.UTC()
|
||||
},
|
||||
func(a, b labels.Selector) bool {
|
||||
return a.String() == b.String()
|
||||
},
|
||||
func(a, b fields.Selector) bool {
|
||||
return a.String() == b.String()
|
||||
},
|
||||
)
|
||||
500
kube/pkg/apis/core/v1/helper/helpers.go
Normal file
500
kube/pkg/apis/core/v1/helper/helpers.go
Normal file
@@ -0,0 +1,500 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helper
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"kubesphere.io/kubesphere/kube/pkg/apis/core/helper"
|
||||
)
|
||||
|
||||
// IsExtendedResourceName returns true if:
|
||||
// 1. the resource name is not in the default namespace;
|
||||
// 2. resource name does not have "requests." prefix,
|
||||
// to avoid confusion with the convention in quota
|
||||
// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name
|
||||
func IsExtendedResourceName(name v1.ResourceName) bool {
|
||||
if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) {
|
||||
return false
|
||||
}
|
||||
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
|
||||
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
|
||||
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsPrefixedNativeResource returns true if the resource name is in the
|
||||
// *kubernetes.io/ namespace.
|
||||
func IsPrefixedNativeResource(name v1.ResourceName) bool {
|
||||
return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix)
|
||||
}
|
||||
|
||||
// IsNativeResource returns true if the resource name is in the
|
||||
// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are
|
||||
// implicitly in the kubernetes.io/ namespace.
|
||||
func IsNativeResource(name v1.ResourceName) bool {
|
||||
return !strings.Contains(string(name), "/") ||
|
||||
IsPrefixedNativeResource(name)
|
||||
}
|
||||
|
||||
// IsHugePageResourceName returns true if the resource name has the huge page
|
||||
// resource prefix.
|
||||
func IsHugePageResourceName(name v1.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
|
||||
}
|
||||
|
||||
// HugePageResourceName returns a ResourceName with the canonical hugepage
|
||||
// prefix prepended for the specified page size. The page size is converted
|
||||
// to its canonical representation.
|
||||
func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName {
|
||||
return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String()))
|
||||
}
|
||||
|
||||
// HugePageSizeFromResourceName returns the page size for the specified huge page
|
||||
// resource name. If the specified input is not a valid huge page resource name
|
||||
// an error is returned.
|
||||
func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) {
|
||||
if !IsHugePageResourceName(name) {
|
||||
return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name)
|
||||
}
|
||||
pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix)
|
||||
return resource.ParseQuantity(pageSize)
|
||||
}
|
||||
|
||||
// IsOvercommitAllowed returns true if the resource is in the default
|
||||
// namespace and is not hugepages.
|
||||
func IsOvercommitAllowed(name v1.ResourceName) bool {
|
||||
return IsNativeResource(name) &&
|
||||
!IsHugePageResourceName(name)
|
||||
}
|
||||
|
||||
func IsAttachableVolumeResourceName(name v1.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix)
|
||||
}
|
||||
|
||||
// Extended and Hugepages resources
|
||||
func IsScalarResourceName(name v1.ResourceName) bool {
|
||||
return IsExtendedResourceName(name) || IsHugePageResourceName(name) ||
|
||||
IsPrefixedNativeResource(name) || IsAttachableVolumeResourceName(name)
|
||||
}
|
||||
|
||||
// this function aims to check if the service's ClusterIP is set or not
|
||||
// the objective is not to perform validation here
|
||||
func IsServiceIPSet(service *v1.Service) bool {
|
||||
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
|
||||
}
|
||||
|
||||
// TODO: make method on LoadBalancerStatus?
|
||||
func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool {
|
||||
return ingressSliceEqual(l.Ingress, r.Ingress)
|
||||
}
|
||||
|
||||
func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool {
|
||||
if len(lhs) != len(rhs) {
|
||||
return false
|
||||
}
|
||||
for i := range lhs {
|
||||
if !ingressEqual(&lhs[i], &rhs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool {
|
||||
if lhs.IP != rhs.IP {
|
||||
return false
|
||||
}
|
||||
if lhs.Hostname != rhs.Hostname {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetAccessModesAsString returns a string representation of an array of access modes.
|
||||
// modes, when present, are always in the same order: RWO,ROX,RWX.
|
||||
func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string {
|
||||
modes = removeDuplicateAccessModes(modes)
|
||||
modesStr := []string{}
|
||||
if containsAccessMode(modes, v1.ReadWriteOnce) {
|
||||
modesStr = append(modesStr, "RWO")
|
||||
}
|
||||
if containsAccessMode(modes, v1.ReadOnlyMany) {
|
||||
modesStr = append(modesStr, "ROX")
|
||||
}
|
||||
if containsAccessMode(modes, v1.ReadWriteMany) {
|
||||
modesStr = append(modesStr, "RWX")
|
||||
}
|
||||
return strings.Join(modesStr, ",")
|
||||
}
|
||||
|
||||
// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString
|
||||
func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode {
|
||||
strmodes := strings.Split(modes, ",")
|
||||
accessModes := []v1.PersistentVolumeAccessMode{}
|
||||
for _, s := range strmodes {
|
||||
s = strings.Trim(s, " ")
|
||||
switch {
|
||||
case s == "RWO":
|
||||
accessModes = append(accessModes, v1.ReadWriteOnce)
|
||||
case s == "ROX":
|
||||
accessModes = append(accessModes, v1.ReadOnlyMany)
|
||||
case s == "RWX":
|
||||
accessModes = append(accessModes, v1.ReadWriteMany)
|
||||
}
|
||||
}
|
||||
return accessModes
|
||||
}
|
||||
|
||||
// removeDuplicateAccessModes returns an array of access modes without any duplicates
|
||||
func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode {
|
||||
accessModes := []v1.PersistentVolumeAccessMode{}
|
||||
for _, m := range modes {
|
||||
if !containsAccessMode(accessModes, m) {
|
||||
accessModes = append(accessModes, m)
|
||||
}
|
||||
}
|
||||
return accessModes
|
||||
}
|
||||
|
||||
func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
||||
// labels.Selector.
|
||||
func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {
|
||||
if len(nsm) == 0 {
|
||||
return labels.Nothing(), nil
|
||||
}
|
||||
selector := labels.NewSelector()
|
||||
for _, expr := range nsm {
|
||||
var op selection.Operator
|
||||
switch expr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
op = selection.In
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
op = selection.NotIn
|
||||
case v1.NodeSelectorOpExists:
|
||||
op = selection.Exists
|
||||
case v1.NodeSelectorOpDoesNotExist:
|
||||
op = selection.DoesNotExist
|
||||
case v1.NodeSelectorOpGt:
|
||||
op = selection.GreaterThan
|
||||
case v1.NodeSelectorOpLt:
|
||||
op = selection.LessThan
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
|
||||
}
|
||||
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector = selector.Add(*r)
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements
|
||||
// fields.Selector.
|
||||
func NodeSelectorRequirementsAsFieldSelector(nsm []v1.NodeSelectorRequirement) (fields.Selector, error) {
|
||||
if len(nsm) == 0 {
|
||||
return fields.Nothing(), nil
|
||||
}
|
||||
|
||||
selectors := []fields.Selector{}
|
||||
for _, expr := range nsm {
|
||||
switch expr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
if len(expr.Values) != 1 {
|
||||
return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q",
|
||||
len(expr.Values), expr.Operator)
|
||||
}
|
||||
selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0]))
|
||||
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
if len(expr.Values) != 1 {
|
||||
return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q",
|
||||
len(expr.Values), expr.Operator)
|
||||
}
|
||||
selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0]))
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator)
|
||||
}
|
||||
}
|
||||
|
||||
return fields.AndSelectors(selectors...), nil
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms
|
||||
func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool {
|
||||
for _, req := range reqs {
|
||||
for _, term := range terms {
|
||||
for _, r := range term.MatchExpressions {
|
||||
if r.Key == req.Key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed;
|
||||
// nil or empty term matches no objects.
|
||||
func MatchNodeSelectorTerms(
|
||||
nodeSelectorTerms []v1.NodeSelectorTerm,
|
||||
nodeLabels labels.Set,
|
||||
nodeFields fields.Set,
|
||||
) bool {
|
||||
for _, req := range nodeSelectorTerms {
|
||||
// nil or empty term selects no objects
|
||||
if len(req.MatchExpressions) == 0 && len(req.MatchFields) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(req.MatchExpressions) != 0 {
|
||||
labelSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
if err != nil || !labelSelector.Matches(nodeLabels) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.MatchFields) != 0 {
|
||||
fieldSelector, err := NodeSelectorRequirementsAsFieldSelector(req.MatchFields)
|
||||
if err != nil || !fieldSelector.Matches(nodeFields) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct
|
||||
// that implements labels.Selector.
|
||||
func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) {
|
||||
if len(tsm) == 0 {
|
||||
return labels.Nothing(), nil
|
||||
}
|
||||
|
||||
selector := labels.NewSelector()
|
||||
for _, expr := range tsm {
|
||||
r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector = selector.Add(*r)
|
||||
}
|
||||
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed;
|
||||
// nil or empty term matches no objects; while empty term list matches all objects.
|
||||
func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool {
|
||||
if len(topologySelectorTerms) == 0 {
|
||||
// empty term list matches all objects
|
||||
return true
|
||||
}
|
||||
|
||||
for _, req := range topologySelectorTerms {
|
||||
// nil or empty term selects no objects
|
||||
if len(req.MatchLabelExpressions) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions)
|
||||
if err != nil || !labelSelector.Matches(lbls) {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool {
|
||||
podTolerations := spec.Tolerations
|
||||
|
||||
var newTolerations []v1.Toleration
|
||||
updated := false
|
||||
for i := range podTolerations {
|
||||
if toleration.MatchToleration(&podTolerations[i]) {
|
||||
if helper.Semantic.DeepEqual(toleration, podTolerations[i]) {
|
||||
return false
|
||||
}
|
||||
newTolerations = append(newTolerations, *toleration)
|
||||
updated = true
|
||||
continue
|
||||
}
|
||||
|
||||
newTolerations = append(newTolerations, podTolerations[i])
|
||||
}
|
||||
|
||||
if !updated {
|
||||
newTolerations = append(newTolerations, *toleration)
|
||||
}
|
||||
|
||||
spec.Tolerations = newTolerations
|
||||
return true
|
||||
}
|
||||
|
||||
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool {
|
||||
return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration)
|
||||
}
|
||||
|
||||
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
||||
func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool {
|
||||
for i := range tolerations {
|
||||
if tolerations[i].ToleratesTaint(taint) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type taintsFilterFunc func(*v1.Taint) bool
|
||||
|
||||
// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates
|
||||
// all the taints that apply to the filter in given taint list.
|
||||
func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool {
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for i := range taints {
|
||||
if applyFilter != nil && !applyFilter(&taints[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !TolerationsTolerateTaint(tolerations, &taints[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
|
||||
func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) {
|
||||
if len(taints) == 0 {
|
||||
return true, []v1.Toleration{}
|
||||
}
|
||||
if len(tolerations) == 0 && len(taints) > 0 {
|
||||
return false, []v1.Toleration{}
|
||||
}
|
||||
result := []v1.Toleration{}
|
||||
for i := range taints {
|
||||
tolerated := false
|
||||
for j := range tolerations {
|
||||
if tolerations[j].ToleratesTaint(&taints[i]) {
|
||||
result = append(result, tolerations[j])
|
||||
tolerated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tolerated {
|
||||
return false, []v1.Toleration{}
|
||||
}
|
||||
}
|
||||
return true, result
|
||||
}
|
||||
|
||||
func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) {
|
||||
var avoidPods v1.AvoidPods
|
||||
if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" {
|
||||
err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods)
|
||||
if err != nil {
|
||||
return avoidPods, err
|
||||
}
|
||||
}
|
||||
return avoidPods, nil
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClass returns StorageClassName.
|
||||
func GetPersistentVolumeClass(volume *v1.PersistentVolume) string {
|
||||
// Use beta annotation first
|
||||
if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found {
|
||||
return class
|
||||
}
|
||||
|
||||
return volume.Spec.StorageClassName
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
|
||||
// requested, it returns "".
|
||||
func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
|
||||
// Use beta annotation first
|
||||
if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found {
|
||||
return class
|
||||
}
|
||||
|
||||
if claim.Spec.StorageClassName != nil {
|
||||
return *claim.Spec.StorageClassName
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements
|
||||
// labels.Selector.
|
||||
func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) {
|
||||
selector := labels.NewSelector()
|
||||
var op selection.Operator
|
||||
switch ssr.Operator {
|
||||
case v1.ScopeSelectorOpIn:
|
||||
op = selection.In
|
||||
case v1.ScopeSelectorOpNotIn:
|
||||
op = selection.NotIn
|
||||
case v1.ScopeSelectorOpExists:
|
||||
op = selection.Exists
|
||||
case v1.ScopeSelectorOpDoesNotExist:
|
||||
op = selection.DoesNotExist
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator)
|
||||
}
|
||||
r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector = selector.Add(*r)
|
||||
return selector, nil
|
||||
}
|
||||
103
kube/pkg/apis/core/v1/helper/qos/qos.go
Normal file
103
kube/pkg/apis/core/v1/helper/qos/qos.go
Normal file
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package qos
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory))
|
||||
|
||||
// QOSList is a set of (resource name, QoS class) pairs.
|
||||
type QOSList map[corev1.ResourceName]corev1.PodQOSClass
|
||||
|
||||
func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
|
||||
return supportedQoSComputeResources.Has(string(name))
|
||||
}
|
||||
|
||||
// GetPodQOS returns the QoS class of a pod.
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
func GetPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
|
||||
requests := corev1.ResourceList{}
|
||||
limits := corev1.ResourceList{}
|
||||
zeroQuantity := resource.MustParse("0")
|
||||
isGuaranteed := true
|
||||
allContainers := []corev1.Container{}
|
||||
allContainers = append(allContainers, pod.Spec.Containers...)
|
||||
allContainers = append(allContainers, pod.Spec.InitContainers...)
|
||||
for _, container := range allContainers {
|
||||
// process requests
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
if !isSupportedQoSComputeResource(name) {
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(zeroQuantity) == 1 {
|
||||
delta := quantity.DeepCopy()
|
||||
if _, exists := requests[name]; !exists {
|
||||
requests[name] = delta
|
||||
} else {
|
||||
delta.Add(requests[name])
|
||||
requests[name] = delta
|
||||
}
|
||||
}
|
||||
}
|
||||
// process limits
|
||||
qosLimitsFound := sets.NewString()
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if !isSupportedQoSComputeResource(name) {
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(zeroQuantity) == 1 {
|
||||
qosLimitsFound.Insert(string(name))
|
||||
delta := quantity.DeepCopy()
|
||||
if _, exists := limits[name]; !exists {
|
||||
limits[name] = delta
|
||||
} else {
|
||||
delta.Add(limits[name])
|
||||
limits[name] = delta
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) {
|
||||
isGuaranteed = false
|
||||
}
|
||||
}
|
||||
if len(requests) == 0 && len(limits) == 0 {
|
||||
return corev1.PodQOSBestEffort
|
||||
}
|
||||
// Check is requests match limits for all resources.
|
||||
if isGuaranteed {
|
||||
for name, req := range requests {
|
||||
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
|
||||
isGuaranteed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if isGuaranteed &&
|
||||
len(requests) == len(limits) {
|
||||
return corev1.PodQOSGuaranteed
|
||||
}
|
||||
return corev1.PodQOSBurstable
|
||||
}
|
||||
680
kube/pkg/features/features.go
Normal file
680
kube/pkg/features/features.go
Normal file
@@ -0,0 +1,680 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package features
|
||||
|
||||
import (
|
||||
"k8s.io/component-base/featuregate"
|
||||
)
|
||||
|
||||
const (
|
||||
// Every feature gate should add method here following this template:
|
||||
//
|
||||
// // owner: @username
|
||||
// // alpha: v1.X
|
||||
// MyFeature featuregate.Feature = "MyFeature"
|
||||
|
||||
// owner: @tallclair
|
||||
// beta: v1.4
|
||||
AppArmor featuregate.Feature = "AppArmor"
|
||||
|
||||
// owner: @mtaufen
|
||||
// alpha: v1.4
|
||||
// beta: v1.11
|
||||
DynamicKubeletConfig featuregate.Feature = "DynamicKubeletConfig"
|
||||
|
||||
// owner: @pweil-
|
||||
// alpha: v1.5
|
||||
//
|
||||
// Default userns=host for containers that are using other host namespaces, host mounts, the pod
|
||||
// contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE,
|
||||
// SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon.
|
||||
ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting"
|
||||
|
||||
// owner: @jiayingz
|
||||
// beta: v1.10
|
||||
//
|
||||
// Enables support for Device Plugins
|
||||
DevicePlugins featuregate.Feature = "DevicePlugins"
|
||||
|
||||
// owner: @dxist
|
||||
// alpha: v1.16
|
||||
//
|
||||
// Enables support of HPA scaling to zero pods when an object or custom metric is configured.
|
||||
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
|
||||
|
||||
// owner: @mikedanese
|
||||
// alpha: v1.7
|
||||
// beta: v1.12
|
||||
//
|
||||
// Gets a server certificate for the kubelet from the Certificate Signing
|
||||
// Request API instead of generating one self signed and auto rotates the
|
||||
// certificate as expiration approaches.
|
||||
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
|
||||
|
||||
// owner: @jinxu
|
||||
// beta: v1.10
|
||||
//
|
||||
// New local storage types to support local storage capacity isolation
|
||||
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
|
||||
|
||||
// owner: @gnufied
|
||||
// beta: v1.11
|
||||
// Ability to Expand persistent volumes
|
||||
ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes"
|
||||
|
||||
// owner: @mlmhl
|
||||
// beta: v1.15
|
||||
// Ability to expand persistent volumes' file system without unmounting volumes.
|
||||
ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes"
|
||||
|
||||
// owner: @gnufied
|
||||
// alpha: v1.14
|
||||
// beta: v1.16
|
||||
// Ability to expand CSI volumes
|
||||
ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes"
|
||||
|
||||
// owner: @verb
|
||||
// alpha: v1.16
|
||||
//
|
||||
// Allows running an ephemeral container in pod namespaces to troubleshoot a running pod.
|
||||
EphemeralContainers featuregate.Feature = "EphemeralContainers"
|
||||
|
||||
// owner: @sjenning
|
||||
// alpha: v1.11
|
||||
//
|
||||
// Allows resource reservations at the QoS level preventing pods at lower QoS levels from
|
||||
// bursting into resources requested at higher QoS levels (memory only for now)
|
||||
QOSReserved featuregate.Feature = "QOSReserved"
|
||||
|
||||
// owner: @ConnorDoyle
|
||||
// alpha: v1.8
|
||||
// beta: v1.10
|
||||
//
|
||||
// Alternative container-level CPU affinity policies.
|
||||
CPUManager featuregate.Feature = "CPUManager"
|
||||
|
||||
// owner: @szuecs
|
||||
// alpha: v1.12
|
||||
//
|
||||
// Enable nodes to change CPUCFSQuotaPeriod
|
||||
CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod"
|
||||
|
||||
// owner: @lmdaly
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
//
|
||||
// Enable resource managers to make NUMA aligned decisions
|
||||
TopologyManager featuregate.Feature = "TopologyManager"
|
||||
|
||||
// owner: @sjenning
|
||||
// beta: v1.11
|
||||
//
|
||||
// Enable pods to set sysctls on a pod
|
||||
Sysctls featuregate.Feature = "Sysctls"
|
||||
|
||||
// owner @smarterclayton
|
||||
// alpha: v1.16
|
||||
// beta: v1.19
|
||||
// ga: v1.21
|
||||
//
|
||||
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
|
||||
// Lock to false in v1.21 and remove in v1.22.
|
||||
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
|
||||
|
||||
// owner @brendandburns
|
||||
// alpha: v1.9
|
||||
// beta: v1.19
|
||||
// ga: v1.21
|
||||
//
|
||||
// Enable nodes to exclude themselves from service load balancers
|
||||
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
|
||||
|
||||
// owner @smarterclayton
|
||||
// alpha: v1.16
|
||||
// beta: v1.19
|
||||
// ga: v1.21
|
||||
//
|
||||
// Enable nodes to exclude themselves from network disruption checks
|
||||
NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion"
|
||||
|
||||
// owner: @saad-ali
|
||||
// alpha: v1.12
|
||||
// beta: v1.14
|
||||
// GA: v1.18
|
||||
// Enable all logic related to the CSIDriver API object in storage.k8s.io
|
||||
CSIDriverRegistry featuregate.Feature = "CSIDriverRegistry"
|
||||
|
||||
// owner: @screeley44
|
||||
// alpha: v1.9
|
||||
// beta: v1.13
|
||||
// ga: v1.18
|
||||
//
|
||||
// Enable Block volume support in containers.
|
||||
BlockVolume featuregate.Feature = "BlockVolume"
|
||||
|
||||
// owner: @pospispa
|
||||
// GA: v1.11
|
||||
//
|
||||
// Postpone deletion of a PV or a PVC when they are being used
|
||||
StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection"
|
||||
|
||||
// owner: @dims, @derekwaynecarr
|
||||
// alpha: v1.10
|
||||
// beta: v1.14
|
||||
// GA: v1.20
|
||||
//
|
||||
// Implement support for limiting pids in pods
|
||||
SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit"
|
||||
|
||||
// owner: @mikedanese
|
||||
// alpha: v1.13
|
||||
//
|
||||
// Migrate ServiceAccount volumes to use a projected volume consisting of a
|
||||
// ServiceAccountTokenVolumeProjection. This feature adds new required flags
|
||||
// to the API server.
|
||||
BoundServiceAccountTokenVolume featuregate.Feature = "BoundServiceAccountTokenVolume"
|
||||
|
||||
// owner: @mtaufen
|
||||
// alpha: v1.18
|
||||
// beta: v1.20
|
||||
//
|
||||
// Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service
|
||||
// account issuer in the API server.
|
||||
// Note these endpoints serve minimally-compliant discovery docs that are
|
||||
// intended to be used for service account token verification.
|
||||
ServiceAccountIssuerDiscovery featuregate.Feature = "ServiceAccountIssuerDiscovery"
|
||||
|
||||
// owner: @Random-Liu
|
||||
// beta: v1.11
|
||||
//
|
||||
// Enable container log rotation for cri container runtime
|
||||
CRIContainerLogRotation featuregate.Feature = "CRIContainerLogRotation"
|
||||
|
||||
// owner: @krmayankk
|
||||
// beta: v1.14
|
||||
//
|
||||
// Enables control over the primary group ID of containers' init processes.
|
||||
RunAsGroup featuregate.Feature = "RunAsGroup"
|
||||
|
||||
// owner: @saad-ali
|
||||
// ga
|
||||
//
|
||||
// Allow mounting a subpath of a volume in a container
|
||||
// Do not remove this feature gate even though it's GA
|
||||
VolumeSubpath featuregate.Feature = "VolumeSubpath"
|
||||
|
||||
// owner: @ravig
|
||||
// alpha: v1.11
|
||||
//
|
||||
// Include volume count on node to be considered for balanced resource allocation while scheduling.
|
||||
// A node which has closer cpu,memory utilization and volume count is favoured by scheduler
|
||||
// while making decisions.
|
||||
BalanceAttachedNodeVolumes featuregate.Feature = "BalanceAttachedNodeVolumes"
|
||||
|
||||
// owner: @vladimirvivien
|
||||
// alpha: v1.11
|
||||
// beta: v1.14
|
||||
// ga: v1.18
|
||||
//
|
||||
// Enables CSI to use raw block storage volumes
|
||||
CSIBlockVolume featuregate.Feature = "CSIBlockVolume"
|
||||
|
||||
// owner: @pohly
|
||||
// alpha: v1.14
|
||||
// beta: v1.16
|
||||
//
|
||||
// Enables CSI Inline volumes support for pods
|
||||
CSIInlineVolume featuregate.Feature = "CSIInlineVolume"
|
||||
|
||||
// owner: @pohly
|
||||
// alpha: v1.19
|
||||
//
|
||||
// Enables tracking of available storage capacity that CSI drivers provide.
|
||||
CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity"
|
||||
|
||||
// owner: @alculquicondor
|
||||
// beta: v1.20
|
||||
//
|
||||
// Enables the use of PodTopologySpread scheduling plugin to do default
|
||||
// spreading and disables legacy SelectorSpread plugin.
|
||||
DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread"
|
||||
|
||||
// owner: @pohly
|
||||
// alpha: v1.19
|
||||
//
|
||||
// Enables generic ephemeral inline volume support for pods
|
||||
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
|
||||
|
||||
// owner: @chendave
|
||||
// alpha: v1.21
|
||||
//
|
||||
// PreferNominatedNode tells scheduler whether the nominated node will be checked first before looping
|
||||
// all the rest of nodes in the cluster.
|
||||
// Enabling this feature also implies the preemptor pod might not be dispatched to the best candidate in
|
||||
// some corner case, e.g. another node releases enough resources after the nominated node has been set
|
||||
// and hence is the best candidate instead.
|
||||
PreferNominatedNode featuregate.Feature = "PreferNominatedNode"
|
||||
|
||||
// owner: @tallclair
|
||||
// alpha: v1.12
|
||||
// beta: v1.14
|
||||
// GA: v1.20
|
||||
//
|
||||
// Enables RuntimeClass, for selecting between multiple runtimes to run a pod.
|
||||
RuntimeClass featuregate.Feature = "RuntimeClass"
|
||||
|
||||
// owner: @mtaufen
|
||||
// alpha: v1.12
|
||||
// beta: v1.14
|
||||
// GA: v1.17
|
||||
//
|
||||
// Kubelet uses the new Lease API to report node heartbeats,
|
||||
// (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal.
|
||||
NodeLease featuregate.Feature = "NodeLease"
|
||||
|
||||
// owner: @janosi
|
||||
// alpha: v1.12
|
||||
// beta: v1.18
|
||||
// GA: v1.20
|
||||
//
|
||||
// Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition
|
||||
SCTPSupport featuregate.Feature = "SCTPSupport"
|
||||
|
||||
// owner: @xing-yang
|
||||
// alpha: v1.12
|
||||
// beta: v1.17
|
||||
// GA: v1.20
|
||||
//
|
||||
// Enable volume snapshot data source support.
|
||||
VolumeSnapshotDataSource featuregate.Feature = "VolumeSnapshotDataSource"
|
||||
|
||||
// owner: @jessfraz
|
||||
// alpha: v1.12
|
||||
//
|
||||
// Enables control over ProcMountType for containers.
|
||||
ProcMountType featuregate.Feature = "ProcMountType"
|
||||
|
||||
// owner: @janetkuo
|
||||
// alpha: v1.12
|
||||
//
|
||||
// Allow TTL controller to clean up Pods and Jobs after they finish.
|
||||
TTLAfterFinished featuregate.Feature = "TTLAfterFinished"
|
||||
|
||||
// owner: @dashpole
|
||||
// alpha: v1.13
|
||||
// beta: v1.15
|
||||
//
|
||||
// Enables the kubelet's pod resources grpc endpoint
|
||||
KubeletPodResources featuregate.Feature = "KubeletPodResources"
|
||||
|
||||
// owner: @davidz627
|
||||
// alpha: v1.14
|
||||
// beta: v1.17
|
||||
//
|
||||
// Enables the in-tree storage to CSI Plugin migration feature.
|
||||
CSIMigration featuregate.Feature = "CSIMigration"
|
||||
|
||||
// owner: @davidz627
|
||||
// alpha: v1.14
|
||||
// beta: v1.17
|
||||
//
|
||||
// Enables the GCE PD in-tree driver to GCE CSI Driver migration feature.
|
||||
CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE"
|
||||
|
||||
// owner: @davidz627
|
||||
// alpha: v1.17
|
||||
//
|
||||
// Disables the GCE PD in-tree driver.
|
||||
// Expects GCE PD CSI Driver to be installed and configured on all nodes.
|
||||
CSIMigrationGCEComplete featuregate.Feature = "CSIMigrationGCEComplete"
|
||||
|
||||
// owner: @leakingtapan
|
||||
// alpha: v1.14
|
||||
// beta: v1.17
|
||||
//
|
||||
// Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature.
|
||||
CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS"
|
||||
|
||||
// owner: @leakingtapan
|
||||
// alpha: v1.17
|
||||
//
|
||||
// Disables the AWS EBS in-tree driver.
|
||||
// Expects AWS EBS CSI Driver to be installed and configured on all nodes.
|
||||
CSIMigrationAWSComplete featuregate.Feature = "CSIMigrationAWSComplete"
|
||||
|
||||
// owner: @andyzhangx
|
||||
// alpha: v1.15
|
||||
// beta: v1.19
|
||||
//
|
||||
// Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature.
|
||||
CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk"
|
||||
|
||||
// owner: @andyzhangx
|
||||
// alpha: v1.17
|
||||
//
|
||||
// Disables the Azure Disk in-tree driver.
|
||||
// Expects Azure Disk CSI Driver to be installed and configured on all nodes.
|
||||
CSIMigrationAzureDiskComplete featuregate.Feature = "CSIMigrationAzureDiskComplete"
|
||||
|
||||
// owner: @andyzhangx
|
||||
// alpha: v1.15
|
||||
//
|
||||
// Enables the Azure File in-tree driver to Azure File Driver migration feature.
|
||||
CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile"
|
||||
|
||||
// owner: @andyzhangx
|
||||
// alpha: v1.17
|
||||
//
|
||||
// Disables the Azure File in-tree driver.
|
||||
// Expects Azure File CSI Driver to be installed and configured on all nodes.
|
||||
CSIMigrationAzureFileComplete featuregate.Feature = "CSIMigrationAzureFileComplete"
|
||||
|
||||
// owner: @divyenpatel
|
||||
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15)
|
||||
//
|
||||
// Enables the vSphere in-tree driver to vSphere CSI Driver migration feature.
|
||||
CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere"
|
||||
|
||||
// owner: @divyenpatel
|
||||
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15)
|
||||
//
|
||||
// Disables the vSphere in-tree driver.
|
||||
// Expects vSphere CSI Driver to be installed and configured on all nodes.
|
||||
CSIMigrationvSphereComplete featuregate.Feature = "CSIMigrationvSphereComplete"
|
||||
|
||||
// owner: @huffmanca
|
||||
// alpha: v1.19
|
||||
// beta: v1.20
|
||||
//
|
||||
// Determines if a CSI Driver supports applying fsGroup.
|
||||
CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy"
|
||||
|
||||
// owner: @gnufied
|
||||
// alpha: v1.18
|
||||
// beta: v1.20
|
||||
// Allows user to configure volume permission change policy for fsGroups when mounting
|
||||
// a volume in a Pod.
|
||||
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
|
||||
|
||||
// owner: @RobertKrawitz, @derekwaynecarr
|
||||
// beta: v1.15
|
||||
// GA: v1.20
|
||||
//
|
||||
// Implement support for limiting pids in nodes
|
||||
SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit"
|
||||
|
||||
// owner: @wk8
|
||||
// alpha: v1.14
|
||||
// beta: v1.16
|
||||
//
|
||||
// Enables GMSA support for Windows workloads.
|
||||
WindowsGMSA featuregate.Feature = "WindowsGMSA"
|
||||
|
||||
// owner: @bclau
|
||||
// alpha: v1.16
|
||||
// beta: v1.17
|
||||
// GA: v1.18
|
||||
//
|
||||
// Enables support for running container entrypoints as different usernames than their default ones.
|
||||
WindowsRunAsUserName featuregate.Feature = "WindowsRunAsUserName"
|
||||
|
||||
// owner: @adisky
|
||||
// alpha: v1.14
|
||||
// beta: v1.18
|
||||
//
|
||||
// Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature.
|
||||
CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack"
|
||||
|
||||
// owner: @adisky
|
||||
// alpha: v1.17
|
||||
//
|
||||
// Disables the OpenStack Cinder in-tree driver.
|
||||
// Expects the OpenStack Cinder CSI Driver to be installed and configured on all nodes.
|
||||
CSIMigrationOpenStackComplete featuregate.Feature = "CSIMigrationOpenStackComplete"
|
||||
|
||||
// owner: @RobertKrawitz
|
||||
// alpha: v1.15
|
||||
//
|
||||
// Allow use of filesystems for ephemeral storage monitoring.
|
||||
// Only applies if LocalStorageCapacityIsolation is set.
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
|
||||
|
||||
// owner: @denkensk
|
||||
// alpha: v1.15
|
||||
// beta: v1.19
|
||||
//
|
||||
// Enables NonPreempting option for priorityClass and pod.
|
||||
NonPreemptingPriority featuregate.Feature = "NonPreemptingPriority"
|
||||
|
||||
// owner: @egernst
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
//
|
||||
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
|
||||
PodOverhead featuregate.Feature = "PodOverhead"
|
||||
|
||||
// owner: @khenidak
|
||||
// alpha: v1.15
|
||||
//
|
||||
// Enables ipv6 dual stack
|
||||
IPv6DualStack featuregate.Feature = "IPv6DualStack"
|
||||
|
||||
// owner: @robscott @freehan
|
||||
// alpha: v1.16
|
||||
//
|
||||
// Enable Endpoint Slices for more scalable Service endpoints.
|
||||
EndpointSlice featuregate.Feature = "EndpointSlice"
|
||||
|
||||
// owner: @robscott @freehan
|
||||
// alpha: v1.18
|
||||
// beta: v1.19
|
||||
//
|
||||
// Enable Endpoint Slice consumption by kube-proxy for improved scalability.
|
||||
EndpointSliceProxying featuregate.Feature = "EndpointSliceProxying"
|
||||
|
||||
// owner: @robscott @kumarvin123
|
||||
// alpha: v1.19
|
||||
//
|
||||
// Enable Endpoint Slice consumption by kube-proxy in Windows for improved scalability.
|
||||
WindowsEndpointSliceProxying featuregate.Feature = "WindowsEndpointSliceProxying"
|
||||
|
||||
// owner: @matthyx
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
// GA: v1.20
|
||||
//
|
||||
// Enables the startupProbe in kubelet worker.
|
||||
StartupProbe featuregate.Feature = "StartupProbe"
|
||||
|
||||
// owner: @deads2k
|
||||
// beta: v1.17
|
||||
//
|
||||
// Enables the users to skip TLS verification of kubelets on pod logs requests
|
||||
AllowInsecureBackendProxy featuregate.Feature = "AllowInsecureBackendProxy"
|
||||
|
||||
// owner: @mortent
|
||||
// alpha: v1.3
|
||||
// beta: v1.5
|
||||
//
|
||||
// Enable all logic related to the PodDisruptionBudget API object in policy
|
||||
PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget"
|
||||
|
||||
// owner: @alaypatel07, @soltysh
|
||||
// alpha: v1.20
|
||||
// beta: v1.21
|
||||
//
|
||||
// CronJobControllerV2 controls whether the controller manager starts old cronjob
|
||||
// controller or new one which is implemented with informers and delaying queue
|
||||
//
|
||||
// This feature is deprecated, and will be removed in v1.22.
|
||||
CronJobControllerV2 featuregate.Feature = "CronJobControllerV2"
|
||||
|
||||
// owner: @smarterclayton
|
||||
// alpha: v1.21
|
||||
//
|
||||
// DaemonSets allow workloads to maintain availability during update per node
|
||||
DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge"
|
||||
|
||||
// owner: @m1093782566
|
||||
// alpha: v1.17
|
||||
//
|
||||
// Enables topology aware service routing
|
||||
ServiceTopology featuregate.Feature = "ServiceTopology"
|
||||
|
||||
// owner: @robscott
|
||||
// alpha: v1.18
|
||||
// beta: v1.19
|
||||
// ga: v1.20
|
||||
//
|
||||
// Enables AppProtocol field for Services and Endpoints.
|
||||
ServiceAppProtocol featuregate.Feature = "ServiceAppProtocol"
|
||||
|
||||
// owner: @wojtek-t
|
||||
// alpha: v1.18
|
||||
// beta: v1.19
|
||||
// ga: v1.21
|
||||
//
|
||||
// Enables a feature to make secrets and configmaps data immutable.
|
||||
ImmutableEphemeralVolumes featuregate.Feature = "ImmutableEphemeralVolumes"
|
||||
|
||||
// owner: @bart0sh
|
||||
// alpha: v1.18
|
||||
// beta: v1.19
|
||||
//
|
||||
// Enables usage of HugePages-<size> in a volume medium,
|
||||
// e.g. emptyDir:
|
||||
// medium: HugePages-1Gi
|
||||
HugePageStorageMediumSize featuregate.Feature = "HugePageStorageMediumSize"
|
||||
|
||||
// owner: @derekwaynecarr
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enables usage of hugepages-<size> in downward API.
|
||||
DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
|
||||
|
||||
// owner: @freehan
|
||||
// GA: v1.18
|
||||
//
|
||||
// Enable ExternalTrafficPolicy for Service ExternalIPs.
|
||||
// This is for bug fix #69811
|
||||
ExternalPolicyForExternalIP featuregate.Feature = "ExternalPolicyForExternalIP"
|
||||
|
||||
// owner: @bswartz
|
||||
// alpha: v1.18
|
||||
//
|
||||
// Enables usage of any object for volume data source in PVCs
|
||||
AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource"
|
||||
|
||||
// owner: @javidiaz
|
||||
// alpha: v1.19
|
||||
// beta: v1.20
|
||||
//
|
||||
// Allow setting the Fully Qualified Domain Name (FQDN) in the hostname of a Pod. If a Pod does not
|
||||
// have FQDN, this feature has no effect.
|
||||
SetHostnameAsFQDN featuregate.Feature = "SetHostnameAsFQDN"
|
||||
|
||||
// owner: @ksubrmnn
|
||||
// alpha: v1.14
|
||||
// beta: v1.20
|
||||
//
|
||||
// Allows kube-proxy to run in Overlay mode for Windows
|
||||
WinOverlay featuregate.Feature = "WinOverlay"
|
||||
|
||||
// owner: @ksubrmnn
|
||||
// alpha: v1.14
|
||||
//
|
||||
// Allows kube-proxy to create DSR loadbalancers for Windows
|
||||
WinDSR featuregate.Feature = "WinDSR"
|
||||
|
||||
// owner: @RenaudWasTaken @dashpole
|
||||
// alpha: v1.19
|
||||
// beta: v1.20
|
||||
//
|
||||
// Disables Accelerator Metrics Collected by Kubelet
|
||||
DisableAcceleratorUsageMetrics featuregate.Feature = "DisableAcceleratorUsageMetrics"
|
||||
|
||||
// owner: @arjunrn @mwielgus @josephburnett
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Add support for the HPA to scale based on metrics from individual containers
|
||||
// in target pods
|
||||
HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics"
|
||||
|
||||
// owner: @zshihang
|
||||
// alpha: v1.13
|
||||
// beta: v1.20
|
||||
//
|
||||
// Allows kube-controller-manager to publish kube-root-ca.crt configmap to
|
||||
// every namespace. This feature is a prerequisite of BoundServiceAccountTokenVolume.
|
||||
RootCAConfigMap featuregate.Feature = "RootCAConfigMap"
|
||||
|
||||
// owner: @andrewsykim
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enable Terminating condition in Endpoint Slices.
|
||||
EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition"
|
||||
|
||||
// owner: @robscott
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enable NodeName field on Endpoint Slices.
|
||||
EndpointSliceNodeName featuregate.Feature = "EndpointSliceNodeName"
|
||||
|
||||
// owner: @derekwaynecarr
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enables kubelet support to size memory backed volumes
|
||||
SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes"
|
||||
|
||||
// owner: @andrewsykim @SergeyKanzhelev
|
||||
// GA: v1.20
|
||||
//
|
||||
// Ensure kubelet respects exec probe timeouts. Feature gate exists in-case existing workloads
|
||||
// may depend on old behavior where exec probe timeouts were ignored.
|
||||
// Lock to default in v1.21 and remove in v1.22.
|
||||
ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout"
|
||||
|
||||
// owner: @andrewsykim
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enable kubelet exec plugins for image pull credentials.
|
||||
KubeletCredentialProviders featuregate.Feature = "KubeletCredentialProviders"
|
||||
|
||||
// owner: @zshihang
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enable kubelet to pass pod's service account token to NodePublishVolume
|
||||
// call of CSI driver which is mounting volumes for that pod.
|
||||
CSIServiceAccountToken featuregate.Feature = "CSIServiceAccountToken"
|
||||
|
||||
// owner: @bobbypage
|
||||
// alpha: v1.20
|
||||
// Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown.
|
||||
GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown"
|
||||
|
||||
// owner: @andrewsykim @uablrek
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Allows control if NodePorts shall be created for services with "type: LoadBalancer" by defining the spec.AllocateLoadBalancerNodePorts field (bool)
|
||||
ServiceLBNodePortControl featuregate.Feature = "ServiceLBNodePortControl"
|
||||
|
||||
// owner: @janosi
|
||||
// alpha: v1.20
|
||||
//
|
||||
// Enables the usage of different protocols in the same Service with type=LoadBalancer
|
||||
MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService"
|
||||
)
|
||||
188
kube/pkg/quota/v1/evaluator/core/persistent_volume_claims.go
Normal file
188
kube/pkg/quota/v1/evaluator/core/persistent_volume_claims.go
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper"
|
||||
k8sfeatures "kubesphere.io/kubesphere/kube/pkg/features"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
// the name used for object count quota
|
||||
var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource())
|
||||
|
||||
// pvcResources are the set of static resources managed by quota associated with pvcs.
|
||||
// for each resource in this list, it may be refined dynamically based on storage class.
|
||||
var pvcResources = []corev1.ResourceName{
|
||||
corev1.ResourcePersistentVolumeClaims,
|
||||
corev1.ResourceRequestsStorage,
|
||||
}
|
||||
|
||||
// storageClassSuffix is the suffix to the qualified portion of storage class resource name.
|
||||
// For example, if you want to quota storage by storage class, you would have a declaration
|
||||
// that follows <storage-class>.storageclass.storage.k8s.io/<resource>.
|
||||
// For example:
|
||||
// * gold.storageclass.storage.k8s.io/: 500Gi
|
||||
// * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi
|
||||
const storageClassSuffix string = ".storageclass.storage.k8s.io/"
|
||||
|
||||
/* TODO: prune?
|
||||
// ResourceByStorageClass returns a quota resource name by storage class.
|
||||
func ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName {
|
||||
return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
|
||||
}
|
||||
*/
|
||||
|
||||
// V1ResourceByStorageClass returns a quota resource name by storage class.
|
||||
func V1ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName {
|
||||
return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
|
||||
}
|
||||
|
||||
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
|
||||
func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator {
|
||||
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims"))
|
||||
pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace}
|
||||
return pvcEvaluator
|
||||
}
|
||||
|
||||
// pvcEvaluator knows how to evaluate quota usage for persistent volume claims
|
||||
type pvcEvaluator struct {
|
||||
// listFuncByNamespace knows how to list pvc claims
|
||||
listFuncByNamespace generic.ListFuncByNamespace
|
||||
}
|
||||
|
||||
// Constraints verifies that all required resources are present on the item.
|
||||
func (p *pvcEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
// no-op for persistent volume claims
|
||||
return nil
|
||||
}
|
||||
|
||||
// GroupResource that this evaluator tracks
|
||||
func (p *pvcEvaluator) GroupResource() schema.GroupResource {
|
||||
return corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()
|
||||
}
|
||||
|
||||
// Handles returns true if the evaluator should handle the specified operation.
|
||||
func (p *pvcEvaluator) Handles(a admission.Attributes) bool {
|
||||
op := a.GetOperation()
|
||||
if op == admission.Create {
|
||||
return true
|
||||
}
|
||||
if op == admission.Update && utilfeature.DefaultFeatureGate.Enabled(k8sfeatures.ExpandPersistentVolumes) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (p *pvcEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
|
||||
}
|
||||
|
||||
// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches.
|
||||
func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
|
||||
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
|
||||
func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (p *pvcEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName {
|
||||
result := []corev1.ResourceName{}
|
||||
for _, item := range items {
|
||||
// match object count quota fields
|
||||
if quota.Contains([]corev1.ResourceName{pvcObjectCountName}, item) {
|
||||
result = append(result, item)
|
||||
continue
|
||||
}
|
||||
// match pvc resources
|
||||
if quota.Contains(pvcResources, item) {
|
||||
result = append(result, item)
|
||||
continue
|
||||
}
|
||||
// match pvc resources scoped by storage class (<storage-class-name>.storage-class.kubernetes.io/<resource>)
|
||||
for _, resource := range pvcResources {
|
||||
byStorageClass := storageClassSuffix + string(resource)
|
||||
if strings.HasSuffix(string(item), byStorageClass) {
|
||||
result = append(result, item)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Usage knows how to measure usage associated with item.
|
||||
func (p *pvcEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
|
||||
result := corev1.ResourceList{}
|
||||
pvc, err := toExternalPersistentVolumeClaimOrError(item)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// charge for claim
|
||||
result[corev1.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
storageClassRef := helper.GetPersistentVolumeClaimClass(pvc)
|
||||
if len(storageClassRef) > 0 {
|
||||
storageClassClaim := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourcePersistentVolumeClaims))
|
||||
result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
}
|
||||
|
||||
// charge for storage
|
||||
if request, found := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; found {
|
||||
result[corev1.ResourceRequestsStorage] = request
|
||||
// charge usage to the storage class (if present)
|
||||
if len(storageClassRef) > 0 {
|
||||
storageClassStorage := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourceRequestsStorage))
|
||||
result[storageClassStorage] = request
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UsageStats calculates aggregate usage for the object.
|
||||
func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
|
||||
}
|
||||
|
||||
// ensure we implement required interface
|
||||
var _ quota.Evaluator = &pvcEvaluator{}
|
||||
|
||||
func toExternalPersistentVolumeClaimOrError(obj runtime.Object) (*corev1.PersistentVolumeClaim, error) {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
switch t := obj.(type) {
|
||||
case *corev1.PersistentVolumeClaim:
|
||||
pvc = t
|
||||
default:
|
||||
return nil, fmt.Errorf("expect *v1.PersistentVolumeClaim, got %v", t)
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
397
kube/pkg/quota/v1/evaluator/core/pods.go
Normal file
397
kube/pkg/quota/v1/evaluator/core/pods.go
Normal file
@@ -0,0 +1,397 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper"
|
||||
"kubesphere.io/kubesphere/kube/pkg/apis/core/v1/helper/qos"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
// the name used for object count quota
|
||||
var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("pods").GroupResource())
|
||||
|
||||
// podResources are the set of resources managed by quota associated with pods.
|
||||
var podResources = []corev1.ResourceName{
|
||||
podObjectCountName,
|
||||
corev1.ResourceCPU,
|
||||
corev1.ResourceMemory,
|
||||
corev1.ResourceEphemeralStorage,
|
||||
corev1.ResourceRequestsCPU,
|
||||
corev1.ResourceRequestsMemory,
|
||||
corev1.ResourceRequestsEphemeralStorage,
|
||||
corev1.ResourceLimitsCPU,
|
||||
corev1.ResourceLimitsMemory,
|
||||
corev1.ResourceLimitsEphemeralStorage,
|
||||
corev1.ResourcePods,
|
||||
}
|
||||
|
||||
// podResourcePrefixes are the set of prefixes for resources (Hugepages, and other
|
||||
// potential extended reources with specific prefix) managed by quota associated with pods.
|
||||
var podResourcePrefixes = []string{
|
||||
corev1.ResourceHugePagesPrefix,
|
||||
corev1.ResourceRequestsHugePagesPrefix,
|
||||
}
|
||||
|
||||
// requestedResourcePrefixes are the set of prefixes for resources
|
||||
// that might be declared in pod's Resources.Requests/Limits
|
||||
var requestedResourcePrefixes = []string{
|
||||
corev1.ResourceHugePagesPrefix,
|
||||
}
|
||||
|
||||
// maskResourceWithPrefix mask resource with certain prefix
|
||||
// e.g. hugepages-XXX -> requests.hugepages-XXX
|
||||
func maskResourceWithPrefix(resource corev1.ResourceName, prefix string) corev1.ResourceName {
|
||||
return corev1.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource)))
|
||||
}
|
||||
|
||||
// isExtendedResourceNameForQuota returns true if the extended resource name
|
||||
// has the quota related resource prefix.
|
||||
func isExtendedResourceNameForQuota(name corev1.ResourceName) bool {
|
||||
// As overcommit is not supported by extended resources for now,
|
||||
// only quota objects in format of "requests.resourceName" is allowed.
|
||||
return !helper.IsNativeResource(name) && strings.HasPrefix(string(name), corev1.DefaultResourceRequestsPrefix)
|
||||
}
|
||||
|
||||
// NOTE: it was a mistake, but if a quota tracks cpu or memory related resources,
|
||||
// the incoming pod is required to have those values set. we should not repeat
|
||||
// this mistake for other future resources (gpus, ephemeral-storage,etc).
|
||||
// do not add more resources to this list!
|
||||
var validationSet = sets.NewString(
|
||||
string(corev1.ResourceCPU),
|
||||
string(corev1.ResourceMemory),
|
||||
string(corev1.ResourceRequestsCPU),
|
||||
string(corev1.ResourceRequestsMemory),
|
||||
string(corev1.ResourceLimitsCPU),
|
||||
string(corev1.ResourceLimitsMemory),
|
||||
)
|
||||
|
||||
// NewPodEvaluator returns an evaluator that can evaluate pods
|
||||
func NewPodEvaluator(f quota.ListerForResourceFunc, clock clock.Clock) quota.Evaluator {
|
||||
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("pods"))
|
||||
podEvaluator := &podEvaluator{listFuncByNamespace: listFuncByNamespace, clock: clock}
|
||||
return podEvaluator
|
||||
}
|
||||
|
||||
// podEvaluator knows how to measure usage of pods.
|
||||
type podEvaluator struct {
|
||||
// knows how to list pods
|
||||
listFuncByNamespace generic.ListFuncByNamespace
|
||||
// used to track time
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// Constraints verifies that all required resources are present on the pod
|
||||
// In addition, it validates that the resources are valid (i.e. requests < limits)
|
||||
func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
pod, err := toExternalPodOrError(item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container
|
||||
// must make an explicit request for the resource. this was a mistake. it coupled
|
||||
// validation with resource counting, but we did this before QoS was even defined.
|
||||
// let's not make that mistake again with other resources now that QoS is defined.
|
||||
requiredSet := quota.ToSet(required).Intersection(validationSet)
|
||||
missingSet := sets.NewString()
|
||||
for i := range pod.Spec.Containers {
|
||||
enforcePodContainerConstraints(&pod.Spec.Containers[i], requiredSet, missingSet)
|
||||
}
|
||||
for i := range pod.Spec.InitContainers {
|
||||
enforcePodContainerConstraints(&pod.Spec.InitContainers[i], requiredSet, missingSet)
|
||||
}
|
||||
if len(missingSet) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
|
||||
}
|
||||
|
||||
// GroupResource that this evaluator tracks
|
||||
func (p *podEvaluator) GroupResource() schema.GroupResource {
|
||||
return corev1.SchemeGroupVersion.WithResource("pods").GroupResource()
|
||||
}
|
||||
|
||||
// Handles returns true if the evaluator should handle the specified attributes.
|
||||
func (p *podEvaluator) Handles(a admission.Attributes) bool {
|
||||
op := a.GetOperation()
|
||||
if op == admission.Create {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (p *podEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
return generic.Matches(resourceQuota, item, p.MatchingResources, podMatchesScopeFunc)
|
||||
}
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (p *podEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName {
|
||||
result := quota.Intersection(input, podResources)
|
||||
for _, resource := range input {
|
||||
// for resources with certain prefix, e.g. hugepages
|
||||
if quota.ContainsPrefix(podResourcePrefixes, resource) {
|
||||
result = append(result, resource)
|
||||
}
|
||||
// for extended resources
|
||||
if isExtendedResourceNameForQuota(resource) {
|
||||
result = append(result, resource)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// MatchingScopes takes the input specified list of scopes and pod object. Returns the set of scope selectors pod matches.
|
||||
func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
matchedScopes := []corev1.ScopedResourceSelectorRequirement{}
|
||||
for _, selector := range scopeSelectors {
|
||||
match, err := podMatchesScopeFunc(selector, item)
|
||||
if err != nil {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, fmt.Errorf("error on matching scope %v: %v", selector, err)
|
||||
}
|
||||
if match {
|
||||
matchedScopes = append(matchedScopes, selector)
|
||||
}
|
||||
}
|
||||
return matchedScopes, nil
|
||||
}
|
||||
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
|
||||
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
|
||||
func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{}
|
||||
for _, selector := range limitedScopes {
|
||||
isCovered := false
|
||||
for _, matchedScopeSelector := range matchedQuotaScopes {
|
||||
if matchedScopeSelector.ScopeName == selector.ScopeName {
|
||||
isCovered = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isCovered {
|
||||
uncoveredScopes = append(uncoveredScopes, selector)
|
||||
}
|
||||
}
|
||||
return uncoveredScopes, nil
|
||||
}
|
||||
|
||||
// Usage knows how to measure usage associated with pods
|
||||
func (p *podEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
|
||||
// delegate to normal usage
|
||||
return PodUsageFunc(item, p.clock)
|
||||
}
|
||||
|
||||
// UsageStats calculates aggregate usage for the object.
|
||||
func (p *podEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
return generic.CalculateUsageStats(options, p.listFuncByNamespace, podMatchesScopeFunc, p.Usage)
|
||||
}
|
||||
|
||||
// verifies we implement the required interface.
|
||||
var _ quota.Evaluator = &podEvaluator{}
|
||||
|
||||
// enforcePodContainerConstraints checks for required resources that are not set on this container and
|
||||
// adds them to missingSet.
|
||||
func enforcePodContainerConstraints(container *corev1.Container, requiredSet, missingSet sets.String) {
|
||||
requests := container.Resources.Requests
|
||||
limits := container.Resources.Limits
|
||||
containerUsage := podComputeUsageHelper(requests, limits)
|
||||
containerSet := quota.ToSet(quota.ResourceNames(containerUsage))
|
||||
if !containerSet.Equal(requiredSet) {
|
||||
difference := requiredSet.Difference(containerSet)
|
||||
missingSet.Insert(difference.List()...)
|
||||
}
|
||||
}
|
||||
|
||||
// podComputeUsageHelper can summarize the pod compute quota usage based on requests and limits
|
||||
func podComputeUsageHelper(requests corev1.ResourceList, limits corev1.ResourceList) corev1.ResourceList {
|
||||
result := corev1.ResourceList{}
|
||||
result[corev1.ResourcePods] = resource.MustParse("1")
|
||||
if request, found := requests[corev1.ResourceCPU]; found {
|
||||
result[corev1.ResourceCPU] = request
|
||||
result[corev1.ResourceRequestsCPU] = request
|
||||
}
|
||||
if limit, found := limits[corev1.ResourceCPU]; found {
|
||||
result[corev1.ResourceLimitsCPU] = limit
|
||||
}
|
||||
if request, found := requests[corev1.ResourceMemory]; found {
|
||||
result[corev1.ResourceMemory] = request
|
||||
result[corev1.ResourceRequestsMemory] = request
|
||||
}
|
||||
if limit, found := limits[corev1.ResourceMemory]; found {
|
||||
result[corev1.ResourceLimitsMemory] = limit
|
||||
}
|
||||
if request, found := requests[corev1.ResourceEphemeralStorage]; found {
|
||||
result[corev1.ResourceEphemeralStorage] = request
|
||||
result[corev1.ResourceRequestsEphemeralStorage] = request
|
||||
}
|
||||
if limit, found := limits[corev1.ResourceEphemeralStorage]; found {
|
||||
result[corev1.ResourceLimitsEphemeralStorage] = limit
|
||||
}
|
||||
for resource, request := range requests {
|
||||
// for resources with certain prefix, e.g. hugepages
|
||||
if quota.ContainsPrefix(requestedResourcePrefixes, resource) {
|
||||
result[resource] = request
|
||||
result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request
|
||||
}
|
||||
// for extended resources
|
||||
if helper.IsExtendedResourceName(resource) {
|
||||
// only quota objects in format of "requests.resourceName" is allowed for extended resource.
|
||||
result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func toExternalPodOrError(obj runtime.Object) (*corev1.Pod, error) {
|
||||
pod := &corev1.Pod{}
|
||||
switch t := obj.(type) {
|
||||
case *corev1.Pod:
|
||||
pod = t
|
||||
default:
|
||||
return nil, fmt.Errorf("expect *v1.Pod, got %v", t)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// podMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope
|
||||
func podMatchesScopeFunc(selector corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) {
|
||||
pod, err := toExternalPodOrError(object)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch selector.ScopeName {
|
||||
case corev1.ResourceQuotaScopeTerminating:
|
||||
return isTerminating(pod), nil
|
||||
case corev1.ResourceQuotaScopeNotTerminating:
|
||||
return !isTerminating(pod), nil
|
||||
case corev1.ResourceQuotaScopeBestEffort:
|
||||
return isBestEffort(pod), nil
|
||||
case corev1.ResourceQuotaScopeNotBestEffort:
|
||||
return !isBestEffort(pod), nil
|
||||
case corev1.ResourceQuotaScopePriorityClass:
|
||||
return podMatchesSelector(pod, selector)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// PodUsageFunc returns the quota usage for a pod.
|
||||
// A pod is charged for quota if the following are not true.
|
||||
// - pod has a terminal phase (failed or succeeded)
|
||||
// - pod has been marked for deletion and grace period has expired
|
||||
func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, error) {
|
||||
pod, err := toExternalPodOrError(obj)
|
||||
if err != nil {
|
||||
return corev1.ResourceList{}, err
|
||||
}
|
||||
|
||||
// always quota the object count (even if the pod is end of life)
|
||||
// object count quotas track all objects that are in storage.
|
||||
// where "pods" tracks all pods that have not reached a terminal state,
|
||||
// count/pods tracks all pods independent of state.
|
||||
result := corev1.ResourceList{
|
||||
podObjectCountName: *(resource.NewQuantity(1, resource.DecimalSI)),
|
||||
}
|
||||
|
||||
// by convention, we do not quota compute resources that have reached end-of life
|
||||
// note: the "pods" resource is considered a compute resource since it is tied to life-cycle.
|
||||
if !QuotaV1Pod(pod, clock) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
requests := corev1.ResourceList{}
|
||||
limits := corev1.ResourceList{}
|
||||
// TODO: ideally, we have pod level requests and limits in the future.
|
||||
for i := range pod.Spec.Containers {
|
||||
requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests)
|
||||
limits = quota.Add(limits, pod.Spec.Containers[i].Resources.Limits)
|
||||
}
|
||||
// InitContainers are run sequentially before other containers start, so the highest
|
||||
// init container resource is compared against the sum of app containers to determine
|
||||
// the effective usage for both requests and limits.
|
||||
for i := range pod.Spec.InitContainers {
|
||||
requests = quota.Max(requests, pod.Spec.InitContainers[i].Resources.Requests)
|
||||
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
|
||||
}
|
||||
|
||||
result = quota.Add(result, podComputeUsageHelper(requests, limits))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func isBestEffort(pod *corev1.Pod) bool {
|
||||
return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort
|
||||
}
|
||||
|
||||
func isTerminating(pod *corev1.Pod) bool {
|
||||
if pod.Spec.ActiveDeadlineSeconds != nil && *pod.Spec.ActiveDeadlineSeconds >= int64(0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func podMatchesSelector(pod *corev1.Pod, selector corev1.ScopedResourceSelectorRequirement) (bool, error) {
|
||||
labelSelector, err := helper.ScopedResourceSelectorRequirementsAsSelector(selector)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse and convert selector: %v", err)
|
||||
}
|
||||
var m map[string]string
|
||||
if len(pod.Spec.PriorityClassName) != 0 {
|
||||
m = map[string]string{string(corev1.ResourceQuotaScopePriorityClass): pod.Spec.PriorityClassName}
|
||||
}
|
||||
if labelSelector.Matches(labels.Set(m)) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// QuotaV1Pod returns true if the pod is eligible to track against a quota
|
||||
// if it's not in a terminal state according to its phase.
|
||||
func QuotaV1Pod(pod *corev1.Pod, clock clock.Clock) bool {
|
||||
// if pod is terminal, ignore it for quota
|
||||
if corev1.PodFailed == pod.Status.Phase || corev1.PodSucceeded == pod.Status.Phase {
|
||||
return false
|
||||
}
|
||||
// if pods are stuck terminating (for example, a node is lost), we do not want
|
||||
// to charge the user for that pod in quota because it could prevent them from
|
||||
// scaling up new pods to service their application.
|
||||
if pod.DeletionTimestamp != nil && pod.DeletionGracePeriodSeconds != nil {
|
||||
now := clock.Now()
|
||||
deletionTime := pod.DeletionTimestamp.Time
|
||||
gracePeriod := time.Duration(*pod.DeletionGracePeriodSeconds) * time.Second
|
||||
if now.After(deletionTime.Add(gracePeriod)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
49
kube/pkg/quota/v1/evaluator/core/registry.go
Normal file
49
kube/pkg/quota/v1/evaluator/core/registry.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
// legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias
|
||||
var legacyObjectCountAliases = map[schema.GroupVersionResource]corev1.ResourceName{
|
||||
corev1.SchemeGroupVersion.WithResource("configmaps"): corev1.ResourceConfigMaps,
|
||||
corev1.SchemeGroupVersion.WithResource("resourcequotas"): corev1.ResourceQuotas,
|
||||
corev1.SchemeGroupVersion.WithResource("replicationcontrollers"): corev1.ResourceReplicationControllers,
|
||||
corev1.SchemeGroupVersion.WithResource("secrets"): corev1.ResourceSecrets,
|
||||
}
|
||||
|
||||
// NewEvaluators returns the list of static evaluators that manage more than counts
|
||||
func NewEvaluators(f quota.ListerForResourceFunc) []quota.Evaluator {
|
||||
// these evaluators have special logic
|
||||
result := []quota.Evaluator{
|
||||
NewPodEvaluator(f, clock.RealClock{}),
|
||||
NewServiceEvaluator(f),
|
||||
NewPersistentVolumeClaimEvaluator(f),
|
||||
}
|
||||
// these evaluators require an alias for backwards compatibility
|
||||
for gvr, alias := range legacyObjectCountAliases {
|
||||
result = append(result,
|
||||
generic.NewObjectCountEvaluator(gvr.GroupResource(), generic.ListResourceUsingListerFunc(f, gvr), alias))
|
||||
}
|
||||
return result
|
||||
}
|
||||
149
kube/pkg/quota/v1/evaluator/core/services.go
Normal file
149
kube/pkg/quota/v1/evaluator/core/services.go
Normal file
@@ -0,0 +1,149 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
// the name used for object count quota
|
||||
var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("services").GroupResource())
|
||||
|
||||
// serviceResources are the set of resources managed by quota associated with services.
|
||||
var serviceResources = []corev1.ResourceName{
|
||||
serviceObjectCountName,
|
||||
corev1.ResourceServices,
|
||||
corev1.ResourceServicesNodePorts,
|
||||
corev1.ResourceServicesLoadBalancers,
|
||||
}
|
||||
|
||||
// NewServiceEvaluator returns an evaluator that can evaluate services.
|
||||
func NewServiceEvaluator(f quota.ListerForResourceFunc) quota.Evaluator {
|
||||
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("services"))
|
||||
serviceEvaluator := &serviceEvaluator{listFuncByNamespace: listFuncByNamespace}
|
||||
return serviceEvaluator
|
||||
}
|
||||
|
||||
// serviceEvaluator knows how to measure usage for services.
|
||||
type serviceEvaluator struct {
|
||||
// knows how to list items by namespace
|
||||
listFuncByNamespace generic.ListFuncByNamespace
|
||||
}
|
||||
|
||||
// Constraints verifies that all required resources are present on the item
|
||||
func (p *serviceEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
// this is a no-op for services
|
||||
return nil
|
||||
}
|
||||
|
||||
// GroupResource that this evaluator tracks
|
||||
func (p *serviceEvaluator) GroupResource() schema.GroupResource {
|
||||
return corev1.SchemeGroupVersion.WithResource("services").GroupResource()
|
||||
}
|
||||
|
||||
// Handles returns true of the evaluator should handle the specified operation.
|
||||
func (p *serviceEvaluator) Handles(a admission.Attributes) bool {
|
||||
operation := a.GetOperation()
|
||||
// We handle create and update because a service type can change.
|
||||
return admission.Create == operation || admission.Update == operation
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (p *serviceEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
|
||||
}
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (p *serviceEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName {
|
||||
return quota.Intersection(input, serviceResources)
|
||||
}
|
||||
|
||||
// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches.
|
||||
func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
|
||||
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
|
||||
func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// convert the input object to an internal service object or error.
|
||||
func toExternalServiceOrError(obj runtime.Object) (*corev1.Service, error) {
|
||||
svc := &corev1.Service{}
|
||||
switch t := obj.(type) {
|
||||
case *corev1.Service:
|
||||
svc = t
|
||||
default:
|
||||
return nil, fmt.Errorf("expect *v1.Service, got %v", t)
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
// Usage knows how to measure usage associated with services
|
||||
func (p *serviceEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
|
||||
result := corev1.ResourceList{}
|
||||
svc, err := toExternalServiceOrError(item)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
ports := len(svc.Spec.Ports)
|
||||
// default service usage
|
||||
result[serviceObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
result[corev1.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
result[corev1.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI}
|
||||
result[corev1.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI}
|
||||
switch svc.Spec.Type {
|
||||
case corev1.ServiceTypeNodePort:
|
||||
// node port services need to count node ports
|
||||
value := resource.NewQuantity(int64(ports), resource.DecimalSI)
|
||||
result[corev1.ResourceServicesNodePorts] = *value
|
||||
case corev1.ServiceTypeLoadBalancer:
|
||||
// load balancer services need to count node ports and load balancers
|
||||
value := resource.NewQuantity(int64(ports), resource.DecimalSI)
|
||||
result[corev1.ResourceServicesNodePorts] = *value
|
||||
result[corev1.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UsageStats calculates aggregate usage for the object.
|
||||
func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
|
||||
}
|
||||
|
||||
var _ quota.Evaluator = &serviceEvaluator{}
|
||||
|
||||
//GetQuotaServiceType returns ServiceType if the service type is eligible to track against a quota, nor return ""
|
||||
func GetQuotaServiceType(service *corev1.Service) corev1.ServiceType {
|
||||
switch service.Spec.Type {
|
||||
case corev1.ServiceTypeNodePort:
|
||||
return corev1.ServiceTypeNodePort
|
||||
case corev1.ServiceTypeLoadBalancer:
|
||||
return corev1.ServiceTypeLoadBalancer
|
||||
}
|
||||
return corev1.ServiceType("")
|
||||
}
|
||||
44
kube/pkg/quota/v1/generic/configuration.go
Normal file
44
kube/pkg/quota/v1/generic/configuration.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generic
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
)
|
||||
|
||||
// implements a basic configuration
|
||||
type simpleConfiguration struct {
|
||||
evaluators []quota.Evaluator
|
||||
ignoredResources map[schema.GroupResource]struct{}
|
||||
}
|
||||
|
||||
// NewConfiguration creates a quota configuration
|
||||
func NewConfiguration(evaluators []quota.Evaluator, ignoredResources map[schema.GroupResource]struct{}) quota.Configuration {
|
||||
return &simpleConfiguration{
|
||||
evaluators: evaluators,
|
||||
ignoredResources: ignoredResources,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *simpleConfiguration) IgnoredResources() map[schema.GroupResource]struct{} {
|
||||
return c.ignoredResources
|
||||
}
|
||||
|
||||
func (c *simpleConfiguration) Evaluators() []quota.Evaluator {
|
||||
return c.evaluators
|
||||
}
|
||||
319
kube/pkg/quota/v1/generic/evaluator.go
Normal file
319
kube/pkg/quota/v1/generic/evaluator.go
Normal file
@@ -0,0 +1,319 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
)
|
||||
|
||||
// InformerForResourceFunc knows how to provision an informer
|
||||
type InformerForResourceFunc func(schema.GroupVersionResource) (informers.GenericInformer, error)
|
||||
|
||||
// ListerFuncForResourceFunc knows how to provision a lister from an informer func.
|
||||
// The lister returns errors until the informer has synced.
|
||||
func ListerFuncForResourceFunc(f InformerForResourceFunc) quota.ListerForResourceFunc {
|
||||
return func(gvr schema.GroupVersionResource) (cache.GenericLister, error) {
|
||||
informer, err := f(gvr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &protectedLister{
|
||||
hasSynced: cachedHasSynced(informer.Informer().HasSynced),
|
||||
notReadyErr: fmt.Errorf("%v not yet synced", gvr),
|
||||
delegate: informer.Lister(),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// cachedHasSynced returns a function that calls hasSynced() until it returns true once, then returns true
|
||||
func cachedHasSynced(hasSynced func() bool) func() bool {
|
||||
cache := &atomic.Value{}
|
||||
cache.Store(false)
|
||||
return func() bool {
|
||||
if cache.Load().(bool) {
|
||||
// short-circuit if already synced
|
||||
return true
|
||||
}
|
||||
if hasSynced() {
|
||||
// remember we synced
|
||||
cache.Store(true)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// protectedLister returns notReadyError if hasSynced returns false, otherwise delegates to delegate
|
||||
type protectedLister struct {
|
||||
hasSynced func() bool
|
||||
notReadyErr error
|
||||
delegate cache.GenericLister
|
||||
}
|
||||
|
||||
func (p *protectedLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
if !p.hasSynced() {
|
||||
return nil, p.notReadyErr
|
||||
}
|
||||
return p.delegate.List(selector)
|
||||
}
|
||||
func (p *protectedLister) Get(name string) (runtime.Object, error) {
|
||||
if !p.hasSynced() {
|
||||
return nil, p.notReadyErr
|
||||
}
|
||||
return p.delegate.Get(name)
|
||||
}
|
||||
func (p *protectedLister) ByNamespace(namespace string) cache.GenericNamespaceLister {
|
||||
return &protectedNamespaceLister{p.hasSynced, p.notReadyErr, p.delegate.ByNamespace(namespace)}
|
||||
}
|
||||
|
||||
// protectedNamespaceLister returns notReadyError if hasSynced returns false, otherwise delegates to delegate
|
||||
type protectedNamespaceLister struct {
|
||||
hasSynced func() bool
|
||||
notReadyErr error
|
||||
delegate cache.GenericNamespaceLister
|
||||
}
|
||||
|
||||
func (p *protectedNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
if !p.hasSynced() {
|
||||
return nil, p.notReadyErr
|
||||
}
|
||||
return p.delegate.List(selector)
|
||||
}
|
||||
func (p *protectedNamespaceLister) Get(name string) (runtime.Object, error) {
|
||||
if !p.hasSynced() {
|
||||
return nil, p.notReadyErr
|
||||
}
|
||||
return p.delegate.Get(name)
|
||||
}
|
||||
|
||||
// ListResourceUsingListerFunc returns a listing function based on the shared informer factory for the specified resource.
|
||||
func ListResourceUsingListerFunc(l quota.ListerForResourceFunc, resource schema.GroupVersionResource) ListFuncByNamespace {
|
||||
return func(namespace string) ([]runtime.Object, error) {
|
||||
lister, err := l(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lister.ByNamespace(namespace).List(labels.Everything())
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectCountQuotaResourceNameFor returns the object count quota name for specified groupResource
|
||||
func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) corev1.ResourceName {
|
||||
if len(groupResource.Group) == 0 {
|
||||
return corev1.ResourceName("count/" + groupResource.Resource)
|
||||
}
|
||||
return corev1.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group)
|
||||
}
|
||||
|
||||
// ListFuncByNamespace knows how to list resources in a namespace
|
||||
type ListFuncByNamespace func(namespace string) ([]runtime.Object, error)
|
||||
|
||||
// MatchesScopeFunc knows how to evaluate if an object matches a scope
|
||||
type MatchesScopeFunc func(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error)
|
||||
|
||||
// UsageFunc knows how to measure usage associated with an object
|
||||
type UsageFunc func(object runtime.Object) (corev1.ResourceList, error)
|
||||
|
||||
// MatchingResourceNamesFunc is a function that returns the list of resources matched
|
||||
type MatchingResourceNamesFunc func(input []corev1.ResourceName) []corev1.ResourceName
|
||||
|
||||
// MatchesNoScopeFunc returns false on all match checks
|
||||
func MatchesNoScopeFunc(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Matches returns true if the quota matches the specified item.
|
||||
func Matches(
|
||||
resourceQuota *corev1.ResourceQuota, item runtime.Object,
|
||||
matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) {
|
||||
if resourceQuota == nil {
|
||||
return false, fmt.Errorf("expected non-nil quota")
|
||||
}
|
||||
// verify the quota matches on at least one resource
|
||||
matchResource := len(matchFunc(quota.ResourceNames(resourceQuota.Status.Hard))) > 0
|
||||
// by default, no scopes matches all
|
||||
matchScope := true
|
||||
for _, scope := range getScopeSelectorsFromQuota(resourceQuota) {
|
||||
innerMatch, err := scopeFunc(scope, item)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
matchScope = matchScope && innerMatch
|
||||
}
|
||||
return matchResource && matchScope, nil
|
||||
}
|
||||
|
||||
func getScopeSelectorsFromQuota(quota *corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement {
|
||||
selectors := []corev1.ScopedResourceSelectorRequirement{}
|
||||
for _, scope := range quota.Spec.Scopes {
|
||||
selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{
|
||||
ScopeName: scope,
|
||||
Operator: corev1.ScopeSelectorOpExists})
|
||||
}
|
||||
if quota.Spec.ScopeSelector != nil {
|
||||
selectors = append(selectors, quota.Spec.ScopeSelector.MatchExpressions...)
|
||||
}
|
||||
return selectors
|
||||
}
|
||||
|
||||
// CalculateUsageStats is a utility function that knows how to calculate aggregate usage.
|
||||
func CalculateUsageStats(options quota.UsageStatsOptions,
|
||||
listFunc ListFuncByNamespace,
|
||||
scopeFunc MatchesScopeFunc,
|
||||
usageFunc UsageFunc) (quota.UsageStats, error) {
|
||||
// default each tracked resource to zero
|
||||
result := quota.UsageStats{Used: corev1.ResourceList{}}
|
||||
for _, resourceName := range options.Resources {
|
||||
result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
items, err := listFunc(options.Namespace)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to list content: %v", err)
|
||||
}
|
||||
for _, item := range items {
|
||||
// need to verify that the item matches the set of scopes
|
||||
matchesScopes := true
|
||||
for _, scope := range options.Scopes {
|
||||
innerMatch, err := scopeFunc(corev1.ScopedResourceSelectorRequirement{ScopeName: scope}, item)
|
||||
if err != nil {
|
||||
return result, nil
|
||||
}
|
||||
if !innerMatch {
|
||||
matchesScopes = false
|
||||
}
|
||||
}
|
||||
if options.ScopeSelector != nil {
|
||||
for _, selector := range options.ScopeSelector.MatchExpressions {
|
||||
innerMatch, err := scopeFunc(selector, item)
|
||||
if err != nil {
|
||||
return result, nil
|
||||
}
|
||||
matchesScopes = matchesScopes && innerMatch
|
||||
}
|
||||
}
|
||||
// only count usage if there was a match
|
||||
if matchesScopes {
|
||||
usage, err := usageFunc(item)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
result.Used = quota.Add(result.Used, usage)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// objectCountEvaluator provides an implementation for quota.Evaluator
|
||||
// that associates usage of the specified resource based on the number of items
|
||||
// returned by the specified listing function.
|
||||
type objectCountEvaluator struct {
|
||||
// GroupResource that this evaluator tracks.
|
||||
// It is used to construct a generic object count quota name
|
||||
groupResource schema.GroupResource
|
||||
// A function that knows how to list resources by namespace.
|
||||
// TODO move to dynamic client in future
|
||||
listFuncByNamespace ListFuncByNamespace
|
||||
// Names associated with this resource in the quota for generic counting.
|
||||
resourceNames []corev1.ResourceName
|
||||
}
|
||||
|
||||
// Constraints returns an error if the configured resource name is not in the required set.
|
||||
func (o *objectCountEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
// no-op for object counting
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handles returns true if the object count evaluator needs to track this attributes.
|
||||
func (o *objectCountEvaluator) Handles(a admission.Attributes) bool {
|
||||
operation := a.GetOperation()
|
||||
return operation == admission.Create
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (o *objectCountEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc)
|
||||
}
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (o *objectCountEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName {
|
||||
return quota.Intersection(input, o.resourceNames)
|
||||
}
|
||||
|
||||
// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches.
|
||||
func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
|
||||
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
|
||||
func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// Usage returns the resource usage for the specified object
|
||||
func (o *objectCountEvaluator) Usage(object runtime.Object) (corev1.ResourceList, error) {
|
||||
quantity := resource.NewQuantity(1, resource.DecimalSI)
|
||||
resourceList := corev1.ResourceList{}
|
||||
for _, resourceName := range o.resourceNames {
|
||||
resourceList[resourceName] = *quantity
|
||||
}
|
||||
return resourceList, nil
|
||||
}
|
||||
|
||||
// GroupResource tracked by this evaluator
|
||||
func (o *objectCountEvaluator) GroupResource() schema.GroupResource {
|
||||
return o.groupResource
|
||||
}
|
||||
|
||||
// UsageStats calculates aggregate usage for the object.
|
||||
func (o *objectCountEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
return CalculateUsageStats(options, o.listFuncByNamespace, MatchesNoScopeFunc, o.Usage)
|
||||
}
|
||||
|
||||
// Verify implementation of interface at compile time.
|
||||
var _ quota.Evaluator = &objectCountEvaluator{}
|
||||
|
||||
// NewObjectCountEvaluator returns an evaluator that can perform generic
|
||||
// object quota counting. It allows an optional alias for backwards compatibility
|
||||
// purposes for the legacy object counting names in quota. Unless its supporting
|
||||
// backward compatibility, alias should not be used.
|
||||
func NewObjectCountEvaluator(
|
||||
groupResource schema.GroupResource, listFuncByNamespace ListFuncByNamespace,
|
||||
alias corev1.ResourceName) quota.Evaluator {
|
||||
|
||||
resourceNames := []corev1.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)}
|
||||
if len(alias) > 0 {
|
||||
resourceNames = append(resourceNames, alias)
|
||||
}
|
||||
|
||||
return &objectCountEvaluator{
|
||||
groupResource: groupResource,
|
||||
listFuncByNamespace: listFuncByNamespace,
|
||||
resourceNames: resourceNames,
|
||||
}
|
||||
}
|
||||
81
kube/pkg/quota/v1/generic/registry.go
Normal file
81
kube/pkg/quota/v1/generic/registry.go
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generic
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
)
|
||||
|
||||
// implements a basic registry
|
||||
type simpleRegistry struct {
|
||||
lock sync.RWMutex
|
||||
// evaluators tracked by the registry
|
||||
evaluators map[schema.GroupResource]quota.Evaluator
|
||||
}
|
||||
|
||||
// NewRegistry creates a simple registry with initial list of evaluators
|
||||
func NewRegistry(evaluators []quota.Evaluator) quota.Registry {
|
||||
return &simpleRegistry{
|
||||
evaluators: evaluatorsByGroupResource(evaluators),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *simpleRegistry) Add(e quota.Evaluator) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.evaluators[e.GroupResource()] = e
|
||||
}
|
||||
|
||||
func (r *simpleRegistry) Remove(e quota.Evaluator) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
delete(r.evaluators, e.GroupResource())
|
||||
}
|
||||
|
||||
func (r *simpleRegistry) Get(gr schema.GroupResource) quota.Evaluator {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
return r.evaluators[gr]
|
||||
}
|
||||
|
||||
func (r *simpleRegistry) List() []quota.Evaluator {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
return evaluatorsList(r.evaluators)
|
||||
}
|
||||
|
||||
// evaluatorsByGroupResource converts a list of evaluators to a map by group resource.
|
||||
func evaluatorsByGroupResource(items []quota.Evaluator) map[schema.GroupResource]quota.Evaluator {
|
||||
result := map[schema.GroupResource]quota.Evaluator{}
|
||||
for _, item := range items {
|
||||
result[item.GroupResource()] = item
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// evaluatorsList converts a map of evaluators to list
|
||||
func evaluatorsList(input map[schema.GroupResource]quota.Evaluator) []quota.Evaluator {
|
||||
var result []quota.Evaluator
|
||||
for _, item := range input {
|
||||
result = append(result, item)
|
||||
}
|
||||
return result
|
||||
}
|
||||
47
kube/pkg/quota/v1/install/registry.go
Normal file
47
kube/pkg/quota/v1/install/registry.go
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
core "kubesphere.io/kubesphere/kube/pkg/quota/v1/evaluator/core"
|
||||
generic "kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
// NewQuotaConfigurationForAdmission returns a quota configuration for admission control.
|
||||
func NewQuotaConfigurationForAdmission() quota.Configuration {
|
||||
evaluators := core.NewEvaluators(nil)
|
||||
return generic.NewConfiguration(evaluators, DefaultIgnoredResources())
|
||||
}
|
||||
|
||||
// NewQuotaConfigurationForControllers returns a quota configuration for controllers.
|
||||
func NewQuotaConfigurationForControllers(f quota.ListerForResourceFunc) quota.Configuration {
|
||||
evaluators := core.NewEvaluators(f)
|
||||
return generic.NewConfiguration(evaluators, DefaultIgnoredResources())
|
||||
}
|
||||
|
||||
// ignoredResources are ignored by quota by default
|
||||
var ignoredResources = map[schema.GroupResource]struct{}{
|
||||
{Group: "", Resource: "events"}: {},
|
||||
}
|
||||
|
||||
// DefaultIgnoredResources returns the default set of resources that quota system
|
||||
// should ignore. This is exposed so downstream integrators can have access to them.
|
||||
func DefaultIgnoredResources() map[schema.GroupResource]struct{} {
|
||||
return ignoredResources
|
||||
}
|
||||
88
kube/pkg/quota/v1/interfaces.go
Normal file
88
kube/pkg/quota/v1/interfaces.go
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// UsageStatsOptions is an options structs that describes how stats should be calculated
|
||||
type UsageStatsOptions struct {
|
||||
// Namespace where stats should be calculate
|
||||
Namespace string
|
||||
// Scopes that must match counted objects
|
||||
Scopes []corev1.ResourceQuotaScope
|
||||
// Resources are the set of resources to include in the measurement
|
||||
Resources []corev1.ResourceName
|
||||
ScopeSelector *corev1.ScopeSelector
|
||||
}
|
||||
|
||||
// UsageStats is result of measuring observed resource use in the system
|
||||
type UsageStats struct {
|
||||
// Used maps resource to quantity used
|
||||
Used corev1.ResourceList
|
||||
}
|
||||
|
||||
// Evaluator knows how to evaluate quota usage for a particular group resource
|
||||
type Evaluator interface {
|
||||
// Constraints ensures that each required resource is present on item
|
||||
Constraints(required []corev1.ResourceName, item runtime.Object) error
|
||||
// GroupResource returns the groupResource that this object knows how to evaluate
|
||||
GroupResource() schema.GroupResource
|
||||
// Handles determines if quota could be impacted by the specified attribute.
|
||||
// If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota.
|
||||
Handles(operation admission.Attributes) bool
|
||||
// Matches returns true if the specified quota matches the input item
|
||||
Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error)
|
||||
// MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object.
|
||||
MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error)
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
|
||||
UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error)
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches.
|
||||
MatchingResources(input []corev1.ResourceName) []corev1.ResourceName
|
||||
// Usage returns the resource usage for the specified object
|
||||
Usage(item runtime.Object) (corev1.ResourceList, error)
|
||||
// UsageStats calculates latest observed usage stats for all objects
|
||||
UsageStats(options UsageStatsOptions) (UsageStats, error)
|
||||
}
|
||||
|
||||
// Configuration defines how the quota system is configured.
|
||||
type Configuration interface {
|
||||
// IgnoredResources are ignored by quota.
|
||||
IgnoredResources() map[schema.GroupResource]struct{}
|
||||
// Evaluators for quota evaluation.
|
||||
Evaluators() []Evaluator
|
||||
}
|
||||
|
||||
// Registry maintains a list of evaluators
|
||||
type Registry interface {
|
||||
// Add to registry
|
||||
Add(e Evaluator)
|
||||
// Remove from registry
|
||||
Remove(e Evaluator)
|
||||
// Get by group resource
|
||||
Get(gr schema.GroupResource) Evaluator
|
||||
// List from registry
|
||||
List() []Evaluator
|
||||
}
|
||||
|
||||
// ListerForResourceFunc knows how to get a lister for a specific resource
|
||||
type ListerForResourceFunc func(schema.GroupVersionResource) (cache.GenericLister, error)
|
||||
293
kube/pkg/quota/v1/resources.go
Normal file
293
kube/pkg/quota/v1/resources.go
Normal file
@@ -0,0 +1,293 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Equals returns true if the two lists are equivalent
|
||||
func Equals(a corev1.ResourceList, b corev1.ResourceList) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, value1 := range a {
|
||||
value2, found := b[key]
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
if value1.Cmp(value2) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// LessThanOrEqual returns true if a < b for each key in b
|
||||
// If false, it returns the keys in a that exceeded b
|
||||
func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) {
|
||||
result := true
|
||||
resourceNames := []corev1.ResourceName{}
|
||||
for key, value := range b {
|
||||
if other, found := a[key]; found {
|
||||
if other.Cmp(value) > 0 {
|
||||
result = false
|
||||
resourceNames = append(resourceNames, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, resourceNames
|
||||
}
|
||||
|
||||
// Max returns the result of Max(a, b) for each named resource
|
||||
func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList {
|
||||
result := corev1.ResourceList{}
|
||||
for key, value := range a {
|
||||
if other, found := b[key]; found {
|
||||
if value.Cmp(other) <= 0 {
|
||||
result[key] = other.DeepCopy()
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Add returns the result of a + b for each named resource
|
||||
func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList {
|
||||
result := corev1.ResourceList{}
|
||||
for key, value := range a {
|
||||
quantity := value.DeepCopy()
|
||||
if other, found := b[key]; found {
|
||||
quantity.Add(other)
|
||||
}
|
||||
result[key] = quantity
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SubtractWithNonNegativeResult - subtracts and returns result of a - b but
|
||||
// makes sure we don't return negative values to prevent negative resource usage.
|
||||
func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList {
|
||||
zero := resource.MustParse("0")
|
||||
|
||||
result := corev1.ResourceList{}
|
||||
for key, value := range a {
|
||||
quantity := value.DeepCopy()
|
||||
if other, found := b[key]; found {
|
||||
quantity.Sub(other)
|
||||
}
|
||||
if quantity.Cmp(zero) > 0 {
|
||||
result[key] = quantity
|
||||
} else {
|
||||
result[key] = zero
|
||||
}
|
||||
}
|
||||
|
||||
for key := range b {
|
||||
if _, found := result[key]; !found {
|
||||
result[key] = zero
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Subtract returns the result of a - b for each named resource
|
||||
func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList {
|
||||
result := corev1.ResourceList{}
|
||||
for key, value := range a {
|
||||
quantity := value.DeepCopy()
|
||||
if other, found := b[key]; found {
|
||||
quantity.Sub(other)
|
||||
}
|
||||
result[key] = quantity
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
quantity := value.DeepCopy()
|
||||
quantity.Neg()
|
||||
result[key] = quantity
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Mask returns a new resource list that only has the values with the specified names
|
||||
func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList {
|
||||
nameSet := ToSet(names)
|
||||
result := corev1.ResourceList{}
|
||||
for key, value := range resources {
|
||||
if nameSet.Has(string(key)) {
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ResourceNames returns a list of all resource names in the ResourceList
|
||||
func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName {
|
||||
result := []corev1.ResourceName{}
|
||||
for resourceName := range resources {
|
||||
result = append(result, resourceName)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Contains returns true if the specified item is in the list of items
|
||||
func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool {
|
||||
for _, i := range items {
|
||||
if i == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set
|
||||
func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool {
|
||||
for _, prefix := range prefixSet {
|
||||
if strings.HasPrefix(string(item), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Intersection returns the intersection of both list of resources, deduped and sorted
|
||||
func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName {
|
||||
result := make([]corev1.ResourceName, 0, len(a))
|
||||
for _, item := range a {
|
||||
if Contains(result, item) {
|
||||
continue
|
||||
}
|
||||
if !Contains(b, item) {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool { return result[i] < result[j] })
|
||||
return result
|
||||
}
|
||||
|
||||
// Difference returns the list of resources resulting from a-b, deduped and sorted
|
||||
func Difference(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName {
|
||||
result := make([]corev1.ResourceName, 0, len(a))
|
||||
for _, item := range a {
|
||||
if Contains(b, item) || Contains(result, item) {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
sort.Slice(result, func(i, j int) bool { return result[i] < result[j] })
|
||||
return result
|
||||
}
|
||||
|
||||
// IsZero returns true if each key maps to the quantity value 0
|
||||
func IsZero(a corev1.ResourceList) bool {
|
||||
zero := resource.MustParse("0")
|
||||
for _, v := range a {
|
||||
if v.Cmp(zero) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNegative returns the set of resource names that have a negative value.
|
||||
func IsNegative(a corev1.ResourceList) []corev1.ResourceName {
|
||||
results := []corev1.ResourceName{}
|
||||
zero := resource.MustParse("0")
|
||||
for k, v := range a {
|
||||
if v.Cmp(zero) < 0 {
|
||||
results = append(results, k)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ToSet takes a list of resource names and converts to a string set
|
||||
func ToSet(resourceNames []corev1.ResourceName) sets.String {
|
||||
result := sets.NewString()
|
||||
for _, resourceName := range resourceNames {
|
||||
result.Insert(string(resourceName))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// CalculateUsage calculates and returns the requested ResourceList usage.
|
||||
// If an error is returned, usage only contains the resources which encountered no calculation errors.
|
||||
func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) {
|
||||
// find the intersection between the hard resources on the quota
|
||||
// and the resources this controller can track to know what we can
|
||||
// look to measure updated usage stats for
|
||||
hardResources := ResourceNames(hardLimits)
|
||||
potentialResources := []corev1.ResourceName{}
|
||||
evaluators := registry.List()
|
||||
for _, evaluator := range evaluators {
|
||||
potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...)
|
||||
}
|
||||
// NOTE: the intersection just removes duplicates since the evaluator match intersects with hard
|
||||
matchedResources := Intersection(hardResources, potentialResources)
|
||||
|
||||
errors := []error{}
|
||||
|
||||
// sum the observed usage from each evaluator
|
||||
newUsage := corev1.ResourceList{}
|
||||
for _, evaluator := range evaluators {
|
||||
// only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything
|
||||
intersection := evaluator.MatchingResources(matchedResources)
|
||||
if len(intersection) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector}
|
||||
stats, err := evaluator.UsageStats(usageStatsOptions)
|
||||
if err != nil {
|
||||
// remember the error
|
||||
errors = append(errors, err)
|
||||
// exclude resources which encountered calculation errors
|
||||
matchedResources = Difference(matchedResources, intersection)
|
||||
continue
|
||||
}
|
||||
newUsage = Add(newUsage, stats.Used)
|
||||
}
|
||||
|
||||
// mask the observed usage to only the set of resources tracked by this quota
|
||||
// merge our observed usage with the quota usage status
|
||||
// if the new usage is different than the last usage, we will need to do an update
|
||||
newUsage = Mask(newUsage, matchedResources)
|
||||
return newUsage, utilerrors.NewAggregate(errors)
|
||||
}
|
||||
126
kube/plugin/pkg/admission/resourcequota/admission.go
Normal file
126
kube/plugin/pkg/admission/resourcequota/admission.go
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
)
|
||||
|
||||
// QuotaAdmission implements an admission controller that can enforce quota constraints
|
||||
type QuotaAdmission struct {
|
||||
*admission.Handler
|
||||
config *resourcequotaapi.Configuration
|
||||
stopCh <-chan struct{}
|
||||
quotaConfiguration quota.Configuration
|
||||
numEvaluators int
|
||||
quotaAccessor *quotaAccessor
|
||||
evaluator Evaluator
|
||||
}
|
||||
|
||||
// WantsQuotaConfiguration defines a function which sets quota configuration for admission plugins that need it.
|
||||
type WantsQuotaConfiguration interface {
|
||||
SetQuotaConfiguration(quota.Configuration)
|
||||
admission.InitializationValidator
|
||||
}
|
||||
|
||||
var _ admission.ValidationInterface = &QuotaAdmission{}
|
||||
var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&QuotaAdmission{})
|
||||
var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&QuotaAdmission{})
|
||||
var _ = WantsQuotaConfiguration(&QuotaAdmission{})
|
||||
|
||||
type liveLookupEntry struct {
|
||||
expiry time.Time
|
||||
items []*corev1.ResourceQuota
|
||||
}
|
||||
|
||||
// NewResourceQuota configures an admission controller that can enforce quota constraints
|
||||
// using the provided registry. The registry must have the capability to handle group/kinds that
|
||||
// are persisted by the server this admission controller is intercepting
|
||||
func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, stopCh <-chan struct{}) (*QuotaAdmission, error) {
|
||||
quotaAccessor, err := newQuotaAccessor()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &QuotaAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
stopCh: stopCh,
|
||||
numEvaluators: numEvaluators,
|
||||
config: config,
|
||||
quotaAccessor: quotaAccessor,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetExternalKubeClientSet registers the client into QuotaAdmission
|
||||
func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) {
|
||||
a.quotaAccessor.client = client
|
||||
}
|
||||
|
||||
// SetExternalKubeInformerFactory registers an informer factory into QuotaAdmission
|
||||
func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) {
|
||||
a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister()
|
||||
}
|
||||
|
||||
// SetQuotaConfiguration assigns and initializes configuration and evaluator for QuotaAdmission
|
||||
func (a *QuotaAdmission) SetQuotaConfiguration(c quota.Configuration) {
|
||||
a.quotaConfiguration = c
|
||||
a.evaluator = NewQuotaEvaluator(a.quotaAccessor, a.quotaConfiguration.IgnoredResources(), generic.NewRegistry(a.quotaConfiguration.Evaluators()), nil, a.config, a.numEvaluators, a.stopCh)
|
||||
}
|
||||
|
||||
// ValidateInitialization ensures an authorizer is set.
|
||||
func (a *QuotaAdmission) ValidateInitialization() error {
|
||||
if a.quotaAccessor == nil {
|
||||
return fmt.Errorf("missing quotaAccessor")
|
||||
}
|
||||
if a.quotaAccessor.client == nil {
|
||||
return fmt.Errorf("missing quotaAccessor.client")
|
||||
}
|
||||
if a.quotaAccessor.lister == nil {
|
||||
return fmt.Errorf("missing quotaAccessor.lister")
|
||||
}
|
||||
if a.quotaConfiguration == nil {
|
||||
return fmt.Errorf("missing quotaConfiguration")
|
||||
}
|
||||
if a.evaluator == nil {
|
||||
return fmt.Errorf("missing evaluator")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate makes admission decisions while enforcing quota
|
||||
func (a *QuotaAdmission) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) (err error) {
|
||||
// ignore all operations that correspond to sub-resource actions
|
||||
if attr.GetSubresource() != "" {
|
||||
return nil
|
||||
}
|
||||
// ignore all operations that are not namespaced
|
||||
if attr.GetNamespace() == "" {
|
||||
return nil
|
||||
}
|
||||
return a.evaluator.Evaluate(attr)
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Configuration provides configuration for the ResourceQuota admission controller.
|
||||
type Configuration struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// LimitedResources whose consumption is limited by default.
|
||||
// +optional
|
||||
LimitedResources []LimitedResource
|
||||
}
|
||||
|
||||
// LimitedResource matches a resource whose consumption is limited by default.
|
||||
// To consume the resource, there must exist an associated quota that limits
|
||||
// its consumption.
|
||||
type LimitedResource struct {
|
||||
|
||||
// APIGroup is the name of the APIGroup that contains the limited resource.
|
||||
// +optional
|
||||
APIGroup string `json:"apiGroup,omitempty"`
|
||||
|
||||
// Resource is the name of the resource this rule applies to.
|
||||
// For example, if the administrator wants to limit consumption
|
||||
// of a storage resource associated with persistent volume claims,
|
||||
// the value would be "persistentvolumeclaims".
|
||||
Resource string `json:"resource"`
|
||||
|
||||
// For each intercepted request, the quota system will evaluate
|
||||
// its resource usage. It will iterate through each resource consumed
|
||||
// and if the resource contains any substring in this listing, the
|
||||
// quota system will ensure that there is a covering quota. In the
|
||||
// absence of a covering quota, the quota system will deny the request.
|
||||
// For example, if an administrator wants to globally enforce that
|
||||
// that a quota must exist to consume persistent volume claims associated
|
||||
// with any storage class, the list would include
|
||||
// ".storageclass.storage.k8s.io/requests.storage"
|
||||
MatchContains []string
|
||||
|
||||
// For each intercepted request, the quota system will figure out if the input object
|
||||
// satisfies a scope which is present in this listing, then
|
||||
// quota system will ensure that there is a covering quota. In the
|
||||
// absence of a covering quota, the quota system will deny the request.
|
||||
// For example, if an administrator wants to globally enforce that
|
||||
// a quota must exist to create a pod with "cluster-services" priorityclass
|
||||
// the list would include
|
||||
// "PriorityClassNameIn=cluster-services"
|
||||
// +optional
|
||||
// MatchScopes []string `json:"matchScopes,omitempty"`
|
||||
MatchScopes []corev1.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"`
|
||||
}
|
||||
717
kube/plugin/pkg/admission/resourcequota/controller.go
Normal file
717
kube/plugin/pkg/admission/resourcequota/controller.go
Normal file
@@ -0,0 +1,717 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
quota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
)
|
||||
|
||||
// Evaluator is used to see if quota constraints are satisfied.
|
||||
type Evaluator interface {
|
||||
// Evaluate takes an operation and checks to see if quota constraints are satisfied. It returns an error if they are not.
|
||||
// The default implementation process related operations in chunks when possible.
|
||||
Evaluate(a admission.Attributes) error
|
||||
}
|
||||
|
||||
type quotaEvaluator struct {
|
||||
quotaAccessor QuotaAccessor
|
||||
// lockAcquisitionFunc acquires any required locks and returns a cleanup method to defer
|
||||
lockAcquisitionFunc func([]corev1.ResourceQuota) func()
|
||||
|
||||
ignoredResources map[schema.GroupResource]struct{}
|
||||
|
||||
// registry that knows how to measure usage for objects
|
||||
registry quota.Registry
|
||||
|
||||
// TODO these are used together to bucket items by namespace and then batch them up for processing.
|
||||
// The technique is valuable for rollup activities to avoid fanout and reduce resource contention.
|
||||
// We could move this into a library if another component needed it.
|
||||
// queue is indexed by namespace, so that we bundle up on a per-namespace basis
|
||||
queue *workqueue.Type
|
||||
workLock sync.Mutex
|
||||
work map[string][]*admissionWaiter
|
||||
dirtyWork map[string][]*admissionWaiter
|
||||
inProgress sets.String
|
||||
|
||||
// controls the run method so that we can cleanly conform to the Evaluator interface
|
||||
workers int
|
||||
stopCh <-chan struct{}
|
||||
init sync.Once
|
||||
|
||||
// lets us know what resources are limited by default
|
||||
config *resourcequotaapi.Configuration
|
||||
}
|
||||
|
||||
type admissionWaiter struct {
|
||||
attributes admission.Attributes
|
||||
finished chan struct{}
|
||||
result error
|
||||
}
|
||||
|
||||
type defaultDeny struct{}
|
||||
|
||||
func (defaultDeny) Error() string {
|
||||
return "DEFAULT DENY"
|
||||
}
|
||||
|
||||
// IsDefaultDeny returns true if the error is defaultDeny
|
||||
func IsDefaultDeny(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := err.(defaultDeny)
|
||||
return ok
|
||||
}
|
||||
|
||||
func newAdmissionWaiter(a admission.Attributes) *admissionWaiter {
|
||||
return &admissionWaiter{
|
||||
attributes: a,
|
||||
finished: make(chan struct{}),
|
||||
result: defaultDeny{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewQuotaEvaluator configures an admission controller that can enforce quota constraints
|
||||
// using the provided registry. The registry must have the capability to handle group/kinds that
|
||||
// are persisted by the server this admission controller is intercepting
|
||||
func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.GroupResource]struct{}, quotaRegistry quota.Registry, lockAcquisitionFunc func([]corev1.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator {
|
||||
// if we get a nil config, just create an empty default.
|
||||
if config == nil {
|
||||
config = &resourcequotaapi.Configuration{}
|
||||
}
|
||||
|
||||
return "aEvaluator{
|
||||
quotaAccessor: quotaAccessor,
|
||||
lockAcquisitionFunc: lockAcquisitionFunc,
|
||||
|
||||
ignoredResources: ignoredResources,
|
||||
registry: quotaRegistry,
|
||||
|
||||
queue: workqueue.NewNamed("admission_quota_controller"),
|
||||
work: map[string][]*admissionWaiter{},
|
||||
dirtyWork: map[string][]*admissionWaiter{},
|
||||
inProgress: sets.String{},
|
||||
|
||||
workers: workers,
|
||||
stopCh: stopCh,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
func (e *quotaEvaluator) run() {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for i := 0; i < e.workers; i++ {
|
||||
go wait.Until(e.doWork, time.Second, e.stopCh)
|
||||
}
|
||||
<-e.stopCh
|
||||
klog.Infof("Shutting down quota evaluator")
|
||||
e.queue.ShutDown()
|
||||
}
|
||||
|
||||
func (e *quotaEvaluator) doWork() {
|
||||
workFunc := func() bool {
|
||||
ns, admissionAttributes, quit := e.getWork()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer e.completeWork(ns)
|
||||
if len(admissionAttributes) == 0 {
|
||||
return false
|
||||
}
|
||||
e.checkAttributes(ns, admissionAttributes)
|
||||
return false
|
||||
}
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
klog.Infof("quota evaluator worker shutdown")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkAttributes iterates evaluates all the waiting admissionAttributes. It will always notify all waiters
|
||||
// before returning. The default is to deny.
|
||||
func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admissionWaiter) {
|
||||
// notify all on exit
|
||||
defer func() {
|
||||
for _, admissionAttribute := range admissionAttributes {
|
||||
close(admissionAttribute.finished)
|
||||
}
|
||||
}()
|
||||
|
||||
quotas, err := e.quotaAccessor.GetQuotas(ns)
|
||||
if err != nil {
|
||||
for _, admissionAttribute := range admissionAttributes {
|
||||
admissionAttribute.result = err
|
||||
}
|
||||
return
|
||||
}
|
||||
// if limited resources are disabled, we can just return safely when there are no quotas.
|
||||
limitedResourcesDisabled := len(e.config.LimitedResources) == 0
|
||||
if len(quotas) == 0 && limitedResourcesDisabled {
|
||||
for _, admissionAttribute := range admissionAttributes {
|
||||
admissionAttribute.result = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if e.lockAcquisitionFunc != nil {
|
||||
releaseLocks := e.lockAcquisitionFunc(quotas)
|
||||
defer releaseLocks()
|
||||
}
|
||||
|
||||
e.checkQuotas(quotas, admissionAttributes, 3)
|
||||
}
|
||||
|
||||
// checkQuotas checks the admission attributes against the passed quotas. If a quota applies, it will attempt to update it
|
||||
// AFTER it has checked all the admissionAttributes. The method breaks down into phase like this:
|
||||
// 0. make a copy of the quotas to act as a "running" quota so we know what we need to update and can still compare against the
|
||||
// originals
|
||||
// 1. check each admission attribute to see if it fits within *all* the quotas. If it doesn't fit, mark the waiter as failed
|
||||
// and the running quota don't change. If it did fit, check to see if any quota was changed. It there was no quota change
|
||||
// mark the waiter as succeeded. If some quota did change, update the running quotas
|
||||
// 2. If no running quota was changed, return now since no updates are needed.
|
||||
// 3. for each quota that has changed, attempt an update. If all updates succeeded, update all unset waiters to success status and return. If the some
|
||||
// updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version
|
||||
// and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota
|
||||
// documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded.
|
||||
func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) {
|
||||
// yet another copy to compare against originals to see if we actually have deltas
|
||||
originalQuotas, err := copyQuotas(quotas)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
atLeastOneChanged := false
|
||||
for i := range admissionAttributes {
|
||||
admissionAttribute := admissionAttributes[i]
|
||||
newQuotas, err := e.checkRequest(quotas, admissionAttribute.attributes)
|
||||
if err != nil {
|
||||
admissionAttribute.result = err
|
||||
continue
|
||||
}
|
||||
|
||||
// Don't update quota for admissionAttributes that correspond to dry-run requests
|
||||
if admissionAttribute.attributes.IsDryRun() {
|
||||
admissionAttribute.result = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// if the new quotas are the same as the old quotas, then this particular one doesn't issue any updates
|
||||
// that means that no quota docs applied, so it can get a pass
|
||||
atLeastOneChangeForThisWaiter := false
|
||||
for j := range newQuotas {
|
||||
if !quota.Equals(quotas[j].Status.Used, newQuotas[j].Status.Used) {
|
||||
atLeastOneChanged = true
|
||||
atLeastOneChangeForThisWaiter = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !atLeastOneChangeForThisWaiter {
|
||||
admissionAttribute.result = nil
|
||||
}
|
||||
|
||||
quotas = newQuotas
|
||||
}
|
||||
|
||||
// if none of the requests changed anything, there's no reason to issue an update, just fail them all now
|
||||
if !atLeastOneChanged {
|
||||
return
|
||||
}
|
||||
|
||||
// now go through and try to issue updates. Things get a little weird here:
|
||||
// 1. check to see if the quota changed. If not, skip.
|
||||
// 2. if the quota changed and the update passes, be happy
|
||||
// 3. if the quota changed and the update fails, add the original to a retry list
|
||||
var updatedFailedQuotas []corev1.ResourceQuota
|
||||
var lastErr error
|
||||
for i := range quotas {
|
||||
newQuota := quotas[i]
|
||||
|
||||
// if this quota didn't have its status changed, skip it
|
||||
if quota.Equals(originalQuotas[i].Status.Used, newQuota.Status.Used) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := e.quotaAccessor.UpdateQuotaStatus(&newQuota); err != nil {
|
||||
updatedFailedQuotas = append(updatedFailedQuotas, newQuota)
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(updatedFailedQuotas) == 0 {
|
||||
// all the updates succeeded. At this point, anything with the default deny error was just waiting to
|
||||
// get a successful update, so we can mark and notify
|
||||
for _, admissionAttribute := range admissionAttributes {
|
||||
if IsDefaultDeny(admissionAttribute.result) {
|
||||
admissionAttribute.result = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// at this point, errors are fatal. Update all waiters without status to failed and return
|
||||
if remainingRetries <= 0 {
|
||||
for _, admissionAttribute := range admissionAttributes {
|
||||
if IsDefaultDeny(admissionAttribute.result) {
|
||||
admissionAttribute.result = lastErr
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// this retry logic has the same bug that its possible to be checking against quota in a state that never actually exists where
|
||||
// you've added a new documented, then updated an old one, your resource matches both and you're only checking one
|
||||
// updates for these quota names failed. Get the current quotas in the namespace, compare by name, check to see if the
|
||||
// resource versions have changed. If not, we're going to fall through an fail everything. If they all have, then we can try again
|
||||
newQuotas, err := e.quotaAccessor.GetQuotas(quotas[0].Namespace)
|
||||
if err != nil {
|
||||
// this means that updates failed. Anything with a default deny error has failed and we need to let them know
|
||||
for _, admissionAttribute := range admissionAttributes {
|
||||
if IsDefaultDeny(admissionAttribute.result) {
|
||||
admissionAttribute.result = lastErr
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// this logic goes through our cache to find the new version of all quotas that failed update. If something has been removed
|
||||
// it is skipped on this retry. After all, you removed it.
|
||||
quotasToCheck := []corev1.ResourceQuota{}
|
||||
for _, newQuota := range newQuotas {
|
||||
for _, oldQuota := range updatedFailedQuotas {
|
||||
if newQuota.Name == oldQuota.Name {
|
||||
quotasToCheck = append(quotasToCheck, newQuota)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
e.checkQuotas(quotasToCheck, admissionAttributes, remainingRetries-1)
|
||||
}
|
||||
|
||||
func copyQuotas(in []corev1.ResourceQuota) ([]corev1.ResourceQuota, error) {
|
||||
out := make([]corev1.ResourceQuota, 0, len(in))
|
||||
for _, quota := range in {
|
||||
out = append(out, *quota.DeepCopy())
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// filterLimitedResourcesByGroupResource filters the input that match the specified groupResource
|
||||
func filterLimitedResourcesByGroupResource(input []resourcequotaapi.LimitedResource, groupResource schema.GroupResource) []resourcequotaapi.LimitedResource {
|
||||
result := []resourcequotaapi.LimitedResource{}
|
||||
for i := range input {
|
||||
limitedResource := input[i]
|
||||
limitedGroupResource := schema.GroupResource{Group: limitedResource.APIGroup, Resource: limitedResource.Resource}
|
||||
if limitedGroupResource == groupResource {
|
||||
result = append(result, limitedResource)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// limitedByDefault determines from the specified usage and limitedResources the set of resources names
|
||||
// that must be present in a covering quota. It returns empty set if it was unable to determine if
|
||||
// a resource was not limited by default.
|
||||
func limitedByDefault(usage corev1.ResourceList, limitedResources []resourcequotaapi.LimitedResource) []corev1.ResourceName {
|
||||
result := []corev1.ResourceName{}
|
||||
for _, limitedResource := range limitedResources {
|
||||
for k, v := range usage {
|
||||
// if a resource is consumed, we need to check if it matches on the limited resource list.
|
||||
if v.Sign() == 1 {
|
||||
// if we get a match, we add it to limited set
|
||||
for _, matchContain := range limitedResource.MatchContains {
|
||||
if strings.Contains(string(k), matchContain) {
|
||||
result = append(result, k)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
scopes := []corev1.ScopedResourceSelectorRequirement{}
|
||||
for _, limitedResource := range limitedResources {
|
||||
matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes)
|
||||
if err != nil {
|
||||
klog.Errorf("Error while matching limited Scopes: %v", err)
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, err
|
||||
}
|
||||
for _, scope := range matched {
|
||||
scopes = append(scopes, scope)
|
||||
}
|
||||
}
|
||||
return scopes, nil
|
||||
}
|
||||
|
||||
// checkRequest verifies that the request does not exceed any quota constraint. it returns a copy of quotas not yet persisted
|
||||
// that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request
|
||||
func (e *quotaEvaluator) checkRequest(quotas []corev1.ResourceQuota, a admission.Attributes) ([]corev1.ResourceQuota, error) {
|
||||
evaluator := e.registry.Get(a.GetResource().GroupResource())
|
||||
if evaluator == nil {
|
||||
return quotas, nil
|
||||
}
|
||||
return CheckRequest(quotas, a, evaluator, e.config.LimitedResources)
|
||||
}
|
||||
|
||||
// CheckRequest is a static version of quotaEvaluator.checkRequest, possible to be called from outside.
|
||||
func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator,
|
||||
limited []resourcequotaapi.LimitedResource) ([]corev1.ResourceQuota, error) {
|
||||
if !evaluator.Handles(a) {
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
// if we have limited resources enabled for this resource, always calculate usage
|
||||
inputObject := a.GetObject()
|
||||
|
||||
// Check if object matches AdmissionConfiguration matchScopes
|
||||
limitedScopes, err := getMatchedLimitedScopes(evaluator, inputObject, limited)
|
||||
if err != nil {
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
// determine the set of resource names that must exist in a covering quota
|
||||
limitedResourceNames := []corev1.ResourceName{}
|
||||
limitedResources := filterLimitedResourcesByGroupResource(limited, a.GetResource().GroupResource())
|
||||
if len(limitedResources) > 0 {
|
||||
deltaUsage, err := evaluator.Usage(inputObject)
|
||||
if err != nil {
|
||||
return quotas, err
|
||||
}
|
||||
limitedResourceNames = limitedByDefault(deltaUsage, limitedResources)
|
||||
}
|
||||
limitedResourceNamesSet := quota.ToSet(limitedResourceNames)
|
||||
|
||||
// find the set of quotas that are pertinent to this request
|
||||
// reject if we match the quota, but usage is not calculated yet
|
||||
// reject if the input object does not satisfy quota constraints
|
||||
// if there are no pertinent quotas, we can just return
|
||||
interestingQuotaIndexes := []int{}
|
||||
// track the cumulative set of resources that were required across all quotas
|
||||
// this is needed to know if we have satisfied any constraints where consumption
|
||||
// was limited by default.
|
||||
restrictedResourcesSet := sets.String{}
|
||||
restrictedScopes := []corev1.ScopedResourceSelectorRequirement{}
|
||||
for i := range quotas {
|
||||
resourceQuota := quotas[i]
|
||||
scopeSelectors := getScopeSelectorsFromQuota(resourceQuota)
|
||||
localRestrictedScopes, err := evaluator.MatchingScopes(inputObject, scopeSelectors)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error matching scopes of quota %s, err: %v", resourceQuota.Name, err)
|
||||
}
|
||||
for _, scope := range localRestrictedScopes {
|
||||
restrictedScopes = append(restrictedScopes, scope)
|
||||
}
|
||||
|
||||
match, err := evaluator.Matches(&resourceQuota, inputObject)
|
||||
if err != nil {
|
||||
klog.Errorf("Error occurred while matching resource quota, %v, against input object. Err: %v", resourceQuota, err)
|
||||
return quotas, err
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
restrictedResources := evaluator.MatchingResources(hardResources)
|
||||
if err := evaluator.Constraints(restrictedResources, inputObject); err != nil {
|
||||
return nil, admission.NewForbidden(a, fmt.Errorf("failed quota: %s: %v", resourceQuota.Name, err))
|
||||
}
|
||||
if !hasUsageStats(&resourceQuota, restrictedResources) {
|
||||
return nil, admission.NewForbidden(a, fmt.Errorf("status unknown for quota: %s, resources: %s", resourceQuota.Name, prettyPrintResourceNames(restrictedResources)))
|
||||
}
|
||||
interestingQuotaIndexes = append(interestingQuotaIndexes, i)
|
||||
localRestrictedResourcesSet := quota.ToSet(restrictedResources)
|
||||
restrictedResourcesSet.Insert(localRestrictedResourcesSet.List()...)
|
||||
}
|
||||
|
||||
// Usage of some resources cannot be counted in isolation. For example, when
|
||||
// the resource represents a number of unique references to external
|
||||
// resource. In such a case an evaluator needs to process other objects in
|
||||
// the same namespace which needs to be known.
|
||||
namespace := a.GetNamespace()
|
||||
if accessor, err := meta.Accessor(inputObject); namespace != "" && err == nil {
|
||||
if accessor.GetNamespace() == "" {
|
||||
accessor.SetNamespace(namespace)
|
||||
}
|
||||
}
|
||||
// there is at least one quota that definitely matches our object
|
||||
// as a result, we need to measure the usage of this object for quota
|
||||
// on updates, we need to subtract the previous measured usage
|
||||
// if usage shows no change, just return since it has no impact on quota
|
||||
deltaUsage, err := evaluator.Usage(inputObject)
|
||||
if err != nil {
|
||||
return quotas, err
|
||||
}
|
||||
|
||||
// ensure that usage for input object is never negative (this would mean a resource made a negative resource requirement)
|
||||
if negativeUsage := quota.IsNegative(deltaUsage); len(negativeUsage) > 0 {
|
||||
return nil, admission.NewForbidden(a, fmt.Errorf("quota usage is negative for resource(s): %s", prettyPrintResourceNames(negativeUsage)))
|
||||
}
|
||||
|
||||
if admission.Update == a.GetOperation() {
|
||||
prevItem := a.GetOldObject()
|
||||
if prevItem == nil {
|
||||
return nil, admission.NewForbidden(a, fmt.Errorf("unable to get previous usage since prior version of object was not found"))
|
||||
}
|
||||
|
||||
// if we can definitively determine that this is not a case of "create on update",
|
||||
// then charge based on the delta. Otherwise, bill the maximum
|
||||
metadata, err := meta.Accessor(prevItem)
|
||||
if err == nil && len(metadata.GetResourceVersion()) > 0 {
|
||||
prevUsage, innerErr := evaluator.Usage(prevItem)
|
||||
if innerErr != nil {
|
||||
return quotas, innerErr
|
||||
}
|
||||
deltaUsage = quota.SubtractWithNonNegativeResult(deltaUsage, prevUsage)
|
||||
}
|
||||
}
|
||||
|
||||
if quota.IsZero(deltaUsage) {
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
// verify that for every resource that had limited by default consumption
|
||||
// enabled that there was a corresponding quota that covered its use.
|
||||
// if not, we reject the request.
|
||||
hasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet)
|
||||
if len(hasNoCoveringQuota) > 0 {
|
||||
return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ",")))
|
||||
}
|
||||
|
||||
// verify that for every scope that had limited access enabled
|
||||
// that there was a corresponding quota that covered it.
|
||||
// if not, we reject the request.
|
||||
scopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes)
|
||||
if err != nil {
|
||||
return quotas, err
|
||||
}
|
||||
if len(scopesHasNoCoveringQuota) > 0 {
|
||||
return quotas, fmt.Errorf("insufficient quota to match these scopes: %v", scopesHasNoCoveringQuota)
|
||||
}
|
||||
|
||||
if len(interestingQuotaIndexes) == 0 {
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
outQuotas, err := copyQuotas(quotas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, index := range interestingQuotaIndexes {
|
||||
resourceQuota := outQuotas[index]
|
||||
|
||||
hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
requestedUsage := quota.Mask(deltaUsage, hardResources)
|
||||
newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
|
||||
maskedNewUsage := quota.Mask(newUsage, quota.ResourceNames(requestedUsage))
|
||||
|
||||
if allowed, exceeded := quota.LessThanOrEqual(maskedNewUsage, resourceQuota.Status.Hard); !allowed {
|
||||
failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
|
||||
failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
|
||||
failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
|
||||
return nil, admission.NewForbidden(a,
|
||||
fmt.Errorf("exceeded quota: %s, requested: %s, used: %s, limited: %s",
|
||||
resourceQuota.Name,
|
||||
prettyPrint(failedRequestedUsage),
|
||||
prettyPrint(failedUsed),
|
||||
prettyPrint(failedHard)))
|
||||
}
|
||||
|
||||
// update to the new usage number
|
||||
outQuotas[index].Status.Used = newUsage
|
||||
}
|
||||
|
||||
return outQuotas, nil
|
||||
}
|
||||
|
||||
func getScopeSelectorsFromQuota(quota corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement {
|
||||
selectors := []corev1.ScopedResourceSelectorRequirement{}
|
||||
for _, scope := range quota.Spec.Scopes {
|
||||
selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{
|
||||
ScopeName: scope,
|
||||
Operator: corev1.ScopeSelectorOpExists})
|
||||
}
|
||||
if quota.Spec.ScopeSelector != nil {
|
||||
for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions {
|
||||
selectors = append(selectors, scopeSelector)
|
||||
}
|
||||
}
|
||||
return selectors
|
||||
}
|
||||
|
||||
func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
|
||||
e.init.Do(func() {
|
||||
go e.run()
|
||||
})
|
||||
|
||||
// is this resource ignored?
|
||||
gvr := a.GetResource()
|
||||
gr := gvr.GroupResource()
|
||||
if _, ok := e.ignoredResources[gr]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if we do not know how to evaluate use for this resource, create an evaluator
|
||||
evaluator := e.registry.Get(gr)
|
||||
if evaluator == nil {
|
||||
// create an object count evaluator if no evaluator previously registered
|
||||
// note, we do not need aggregate usage here, so we pass a nil informer func
|
||||
evaluator = generic.NewObjectCountEvaluator(gr, nil, "")
|
||||
e.registry.Add(evaluator)
|
||||
klog.Infof("quota admission added evaluator for: %s", gr)
|
||||
}
|
||||
// for this kind, check if the operation could mutate any quota resources
|
||||
// if no resources tracked by quota are impacted, then just return
|
||||
if !evaluator.Handles(a) {
|
||||
return nil
|
||||
}
|
||||
waiter := newAdmissionWaiter(a)
|
||||
|
||||
e.addWork(waiter)
|
||||
|
||||
// wait for completion or timeout
|
||||
select {
|
||||
case <-waiter.finished:
|
||||
case <-time.After(10 * time.Second):
|
||||
return apierrors.NewInternalError(fmt.Errorf("resource quota evaluates timeout"))
|
||||
}
|
||||
|
||||
return waiter.result
|
||||
}
|
||||
|
||||
func (e *quotaEvaluator) addWork(a *admissionWaiter) {
|
||||
e.workLock.Lock()
|
||||
defer e.workLock.Unlock()
|
||||
|
||||
ns := a.attributes.GetNamespace()
|
||||
// this Add can trigger a Get BEFORE the work is added to a list, but this is ok because the getWork routine
|
||||
// waits the worklock before retrieving the work to do, so the writes in this method will be observed
|
||||
e.queue.Add(ns)
|
||||
|
||||
if e.inProgress.Has(ns) {
|
||||
e.dirtyWork[ns] = append(e.dirtyWork[ns], a)
|
||||
return
|
||||
}
|
||||
|
||||
e.work[ns] = append(e.work[ns], a)
|
||||
}
|
||||
|
||||
func (e *quotaEvaluator) completeWork(ns string) {
|
||||
e.workLock.Lock()
|
||||
defer e.workLock.Unlock()
|
||||
|
||||
e.queue.Done(ns)
|
||||
e.work[ns] = e.dirtyWork[ns]
|
||||
delete(e.dirtyWork, ns)
|
||||
e.inProgress.Delete(ns)
|
||||
}
|
||||
|
||||
// getWork returns a namespace, a list of work items in that
|
||||
// namespace, and a shutdown boolean. If not shutdown then the return
|
||||
// must eventually be followed by a call on completeWork for the
|
||||
// returned namespace (regardless of whether the work item list is
|
||||
// empty).
|
||||
func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) {
|
||||
uncastNS, shutdown := e.queue.Get()
|
||||
if shutdown {
|
||||
return "", []*admissionWaiter{}, shutdown
|
||||
}
|
||||
ns := uncastNS.(string)
|
||||
|
||||
e.workLock.Lock()
|
||||
defer e.workLock.Unlock()
|
||||
// at this point, we know we have a coherent view of e.work. It is entirely possible
|
||||
// that our workqueue has another item requeued to it, but we'll pick it up early. This ok
|
||||
// because the next time will go into our dirty list
|
||||
|
||||
work := e.work[ns]
|
||||
delete(e.work, ns)
|
||||
delete(e.dirtyWork, ns)
|
||||
e.inProgress.Insert(ns)
|
||||
return ns, work, false
|
||||
}
|
||||
|
||||
// prettyPrint formats a resource list for usage in errors
|
||||
// it outputs resources sorted in increasing order
|
||||
func prettyPrint(item corev1.ResourceList) string {
|
||||
parts := []string{}
|
||||
keys := []string{}
|
||||
for key := range item {
|
||||
keys = append(keys, string(key))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
value := item[corev1.ResourceName(key)]
|
||||
constraint := key + "=" + value.String()
|
||||
parts = append(parts, constraint)
|
||||
}
|
||||
return strings.Join(parts, ",")
|
||||
}
|
||||
|
||||
func prettyPrintResourceNames(a []corev1.ResourceName) string {
|
||||
values := []string{}
|
||||
for _, value := range a {
|
||||
values = append(values, string(value))
|
||||
}
|
||||
sort.Strings(values)
|
||||
return strings.Join(values, ",")
|
||||
}
|
||||
|
||||
// hasUsageStats returns true if for each hard constraint in interestingResources there is a value for its current usage
|
||||
func hasUsageStats(resourceQuota *corev1.ResourceQuota, interestingResources []corev1.ResourceName) bool {
|
||||
interestingSet := quota.ToSet(interestingResources)
|
||||
for resourceName := range resourceQuota.Status.Hard {
|
||||
if !interestingSet.Has(string(resourceName)) {
|
||||
continue
|
||||
}
|
||||
if _, found := resourceQuota.Status.Used[resourceName]; !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
155
kube/plugin/pkg/admission/resourcequota/resource_access.go
Normal file
155
kube/plugin/pkg/admission/resourcequota/resource_access.go
Normal file
@@ -0,0 +1,155 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/golang-lru"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
)
|
||||
|
||||
// QuotaAccessor abstracts the get/set logic from the rest of the Evaluator. This could be a test stub, a straight passthrough,
|
||||
// or most commonly a series of deconflicting caches.
|
||||
type QuotaAccessor interface {
|
||||
// UpdateQuotaStatus is called to persist final status. This method should write to persistent storage.
|
||||
// An error indicates that write didn't complete successfully.
|
||||
UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error
|
||||
|
||||
// GetQuotas gets all possible quotas for a given namespace
|
||||
GetQuotas(namespace string) ([]corev1.ResourceQuota, error)
|
||||
}
|
||||
|
||||
type quotaAccessor struct {
|
||||
client kubernetes.Interface
|
||||
|
||||
// lister can list/get quota objects from a shared informer's cache
|
||||
lister corev1listers.ResourceQuotaLister
|
||||
|
||||
// liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures.
|
||||
// This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results.
|
||||
// We track the lookup result here so that for repeated requests, we don't look it up very often.
|
||||
liveLookupCache *lru.Cache
|
||||
liveTTL time.Duration
|
||||
// updatedQuotas holds a cache of quotas that we've updated. This is used to pull the "really latest" during back to
|
||||
// back quota evaluations that touch the same quota doc. This only works because we can compare etcd resourceVersions
|
||||
// for the same resource as integers. Before this change: 22 updates with 12 conflicts. after this change: 15 updates with 0 conflicts
|
||||
updatedQuotas *lru.Cache
|
||||
}
|
||||
|
||||
// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
|
||||
func newQuotaAccessor() (*quotaAccessor, error) {
|
||||
liveLookupCache, err := lru.New(100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedCache, err := lru.New(100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// client and lister will be set when SetInternalKubeClientSet and SetInternalKubeInformerFactory are invoked
|
||||
return "aAccessor{
|
||||
liveLookupCache: liveLookupCache,
|
||||
liveTTL: time.Duration(30 * time.Second),
|
||||
updatedQuotas: updatedCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error {
|
||||
updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(context.TODO(),newQuota,metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := newQuota.Namespace + "/" + newQuota.Name
|
||||
e.updatedQuotas.Add(key, updatedQuota)
|
||||
return nil
|
||||
}
|
||||
|
||||
var etcdVersioner = etcd3.APIObjectVersioner{}
|
||||
|
||||
// checkCache compares the passed quota against the value in the look-aside cache and returns the newer
|
||||
// if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions
|
||||
// being monotonically increasing integers
|
||||
func (e *quotaAccessor) checkCache(quota *corev1.ResourceQuota) *corev1.ResourceQuota {
|
||||
key := quota.Namespace + "/" + quota.Name
|
||||
uncastCachedQuota, ok := e.updatedQuotas.Get(key)
|
||||
if !ok {
|
||||
return quota
|
||||
}
|
||||
cachedQuota := uncastCachedQuota.(*corev1.ResourceQuota)
|
||||
|
||||
if etcdVersioner.CompareResourceVersion(quota, cachedQuota) >= 0 {
|
||||
e.updatedQuotas.Remove(key)
|
||||
return quota
|
||||
}
|
||||
return cachedQuota
|
||||
}
|
||||
|
||||
func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, error) {
|
||||
// determine if there are any quotas in this namespace
|
||||
// if there are no quotas, we don't need to do anything
|
||||
items, err := e.lister.ResourceQuotas(namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error resolving quota: %v", err)
|
||||
}
|
||||
|
||||
// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
|
||||
if len(items) == 0 {
|
||||
lruItemObj, ok := e.liveLookupCache.Get(namespace)
|
||||
if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
|
||||
// TODO: If there are multiple operations at the same time and cache has just expired,
|
||||
// this may cause multiple List operations being issued at the same time.
|
||||
// If there is already in-flight List() for a given namespace, we should wait until
|
||||
// it is finished and cache is updated instead of doing the same, also to avoid
|
||||
// throttling - see #22422 for details.
|
||||
liveList, err := e.client.CoreV1().ResourceQuotas(namespace).List(context.TODO(),metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newEntry := liveLookupEntry{expiry: time.Now().Add(e.liveTTL)}
|
||||
for i := range liveList.Items {
|
||||
newEntry.items = append(newEntry.items, &liveList.Items[i])
|
||||
}
|
||||
e.liveLookupCache.Add(namespace, newEntry)
|
||||
lruItemObj = newEntry
|
||||
}
|
||||
lruEntry := lruItemObj.(liveLookupEntry)
|
||||
for i := range lruEntry.items {
|
||||
items = append(items, lruEntry.items[i])
|
||||
}
|
||||
}
|
||||
|
||||
resourceQuotas := []corev1.ResourceQuota{}
|
||||
for i := range items {
|
||||
quota := items[i]
|
||||
quota = e.checkCache(quota)
|
||||
// always make a copy. We're going to muck around with this and we should never mutate the originals
|
||||
resourceQuotas = append(resourceQuotas, *quota)
|
||||
}
|
||||
|
||||
return resourceQuotas, nil
|
||||
}
|
||||
26
pkg/apis/addtoscheme_quota_v1alpha2.go
Normal file
26
pkg/apis/addtoscheme_quota_v1alpha2.go
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
import (
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
|
||||
AddToSchemes = append(AddToSchemes, quotav1alpha2.SchemeBuilder.AddToScheme)
|
||||
}
|
||||
@@ -23,8 +23,9 @@ We use a special type of secret as a credential for DevOps.
|
||||
This file will not contain CRD, but the credential type constants and their fields.
|
||||
*/
|
||||
const (
|
||||
CredentialFinalizerName = "finalizers.kubesphere.io/credential"
|
||||
DevOpsCredentialPrefix = "credential.devops.kubesphere.io/"
|
||||
CredentialFinalizerName = "finalizers.kubesphere.io/credential"
|
||||
DevOpsCredentialPrefix = "credential.devops.kubesphere.io/"
|
||||
DevOpsCredentialDataHash = DevOpsCredentialPrefix + "datahash"
|
||||
// SecretTypeBasicAuth contains data needed for basic authentication.
|
||||
//
|
||||
// Required at least one of fields:
|
||||
|
||||
5
pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go
generated
5
pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go
generated
@@ -770,6 +770,11 @@ func (in *UserSpec) DeepCopy() *UserSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UserStatus) DeepCopyInto(out *UserStatus) {
|
||||
*out = *in
|
||||
if in.State != nil {
|
||||
in, out := &in.State, &out.State
|
||||
*out = new(UserState)
|
||||
**out = **in
|
||||
}
|
||||
if in.LastTransitionTime != nil {
|
||||
in, out := &in.LastTransitionTime, &out.LastTransitionTime
|
||||
*out = (*in).DeepCopy()
|
||||
|
||||
18
pkg/apis/quota/group.go
Normal file
18
pkg/apis/quota/group.go
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package quota contains quota API versions
|
||||
package quota
|
||||
23
pkg/apis/quota/v1alpha2/doc.go
Normal file
23
pkg/apis/quota/v1alpha2/doc.go
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha2 contains API Schema definitions for the quotas v1alpha2 API group
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/quota
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=quota.kubesphere.io
|
||||
package v1alpha2
|
||||
46
pkg/apis/quota/v1alpha2/register.go
Normal file
46
pkg/apis/quota/v1alpha2/register.go
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// NOTE: Boilerplate only. Ignore this file.
|
||||
|
||||
// Package v1alpha2 contains API Schema definitions for the quotas v1alpha2 API group
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/quota
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=quota.kubesphere.io
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: "quota.kubesphere.io", Version: "v1alpha2"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
||||
|
||||
// AddToScheme is required by pkg/client/...
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Resource is required by pkg/client/listers/...
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
99
pkg/apis/quota/v1alpha2/types.go
Normal file
99
pkg/apis/quota/v1alpha2/types.go
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ResourceKindCluster = "ResourceQuota"
|
||||
ResourcesSingularCluster = "resourcequota"
|
||||
ResourcesPluralCluster = "resourcequotas"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&ResourceQuota{}, &ResourceQuotaList{})
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// WorkspaceResourceQuota sets aggregate quota restrictions enforced per workspace
|
||||
// +kubebuilder:resource:categories="quota",scope="Cluster"
|
||||
// +kubebuilder:subresource:status
|
||||
type ResourceQuota struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec defines the desired quota
|
||||
Spec ResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// Status defines the actual enforced quota and its current usage
|
||||
// +optional
|
||||
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// ResourceQuotaSpec defines the desired quota restrictions
|
||||
type ResourceQuotaSpec struct {
|
||||
// LabelSelector is used to select projects by label.
|
||||
LabelSelector map[string]string `json:"selector" protobuf:"bytes,1,opt,name=selector"`
|
||||
|
||||
// Quota defines the desired quota
|
||||
Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"`
|
||||
}
|
||||
|
||||
// ResourceQuotaStatus defines the actual enforced quota and its current usage
|
||||
type ResourceQuotaStatus struct {
|
||||
// Total defines the actual enforced quota and its current usage across all projects
|
||||
Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"`
|
||||
|
||||
// Namespaces slices the usage by project.
|
||||
Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
|
||||
}
|
||||
|
||||
// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace
|
||||
type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace
|
||||
|
||||
// ResourceQuotaStatusByNamespace gives status for a particular project
|
||||
type ResourceQuotaStatusByNamespace struct {
|
||||
corev1.ResourceQuotaStatus `json:",inline"`
|
||||
|
||||
// Namespace the project this status applies to
|
||||
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceQuotaList is a list of WorkspaceResourceQuota items.
|
||||
type ResourceQuotaList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is a list of WorkspaceResourceQuota objects.
|
||||
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
|
||||
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
64
pkg/apis/quota/v1alpha2/types_test.go
Normal file
64
pkg/apis/quota/v1alpha2/types_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
"golang.org/x/net/context"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestStorageResourceQuota(t *testing.T) {
|
||||
key := types.NamespacedName{
|
||||
Name: "foo",
|
||||
}
|
||||
created := &ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: ResourceQuotaSpec{
|
||||
LabelSelector: map[string]string{},
|
||||
},
|
||||
}
|
||||
g := gomega.NewGomegaWithT(t)
|
||||
|
||||
// Test Create
|
||||
fetched := &ResourceQuota{
|
||||
Spec: ResourceQuotaSpec{
|
||||
LabelSelector: map[string]string{},
|
||||
},
|
||||
}
|
||||
g.Expect(c.Create(context.TODO(), created)).To(gomega.Succeed())
|
||||
|
||||
g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed())
|
||||
g.Expect(fetched).To(gomega.Equal(created))
|
||||
|
||||
// Test Updating the Labels
|
||||
updated := fetched.DeepCopy()
|
||||
updated.Labels = map[string]string{"hello": "world"}
|
||||
g.Expect(c.Update(context.TODO(), updated)).To(gomega.Succeed())
|
||||
|
||||
g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.Succeed())
|
||||
g.Expect(fetched).To(gomega.Equal(updated))
|
||||
|
||||
// Test Delete
|
||||
g.Expect(c.Delete(context.TODO(), fetched)).To(gomega.Succeed())
|
||||
g.Expect(c.Get(context.TODO(), key, fetched)).ToNot(gomega.Succeed())
|
||||
}
|
||||
55
pkg/apis/quota/v1alpha2/v1alpha2_suite_test.go
Normal file
55
pkg/apis/quota/v1alpha2/v1alpha2_suite_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
)
|
||||
|
||||
var cfg *rest.Config
|
||||
var c client.Client
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")},
|
||||
}
|
||||
|
||||
err := SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if cfg, err = t.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
code := m.Run()
|
||||
t.Stop()
|
||||
os.Exit(code)
|
||||
}
|
||||
167
pkg/apis/quota/v1alpha2/zz_generated.deepcopy.go
generated
Normal file
167
pkg/apis/quota/v1alpha2/zz_generated.deepcopy.go
generated
Normal file
@@ -0,0 +1,167 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota.
|
||||
func (in *ResourceQuota) DeepCopy() *ResourceQuota {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceQuota)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ResourceQuota) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ResourceQuota, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList.
|
||||
func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceQuotaList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) {
|
||||
*out = *in
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
in.Quota.DeepCopyInto(&out.Quota)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec.
|
||||
func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceQuotaSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) {
|
||||
*out = *in
|
||||
in.Total.DeepCopyInto(&out.Total)
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make(ResourceQuotasStatusByNamespace, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus.
|
||||
func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceQuotaStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) {
|
||||
*out = *in
|
||||
in.ResourceQuotaStatus.DeepCopyInto(&out.ResourceQuotaStatus)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace.
|
||||
func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceQuotaStatusByNamespace)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ResourceQuotasStatusByNamespace, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace.
|
||||
func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceQuotasStatusByNamespace)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
@@ -31,17 +31,17 @@ const (
|
||||
ResourcePluralStrategy = "strategies"
|
||||
)
|
||||
|
||||
type StrategyType string
|
||||
type strategyType string
|
||||
|
||||
const (
|
||||
// Canary strategy type
|
||||
CanaryType StrategyType = "Canary"
|
||||
CanaryType strategyType = "Canary"
|
||||
|
||||
// BlueGreen strategy type
|
||||
BlueGreenType StrategyType = "BlueGreen"
|
||||
BlueGreenType strategyType = "BlueGreen"
|
||||
|
||||
// Mirror strategy type
|
||||
Mirror StrategyType = "Mirror"
|
||||
Mirror strategyType = "Mirror"
|
||||
)
|
||||
|
||||
type StrategyPolicy string
|
||||
@@ -60,7 +60,7 @@ const (
|
||||
// StrategySpec defines the desired state of Strategy
|
||||
type StrategySpec struct {
|
||||
// Strategy type
|
||||
Type StrategyType `json:"type,omitempty"`
|
||||
Type strategyType `json:"type,omitempty"`
|
||||
|
||||
// Principal version, the one as reference version
|
||||
// label version value
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
devopsv1alpha3 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha3"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/iam/v1alpha2"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2"
|
||||
storagev1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/storage/v1alpha1"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1"
|
||||
@@ -45,6 +46,7 @@ type Interface interface {
|
||||
DevopsV1alpha3() devopsv1alpha3.DevopsV1alpha3Interface
|
||||
IamV1alpha2() iamv1alpha2.IamV1alpha2Interface
|
||||
NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface
|
||||
QuotaV1alpha2() quotav1alpha2.QuotaV1alpha2Interface
|
||||
ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface
|
||||
StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface
|
||||
TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface
|
||||
@@ -62,6 +64,7 @@ type Clientset struct {
|
||||
devopsV1alpha3 *devopsv1alpha3.DevopsV1alpha3Client
|
||||
iamV1alpha2 *iamv1alpha2.IamV1alpha2Client
|
||||
networkV1alpha1 *networkv1alpha1.NetworkV1alpha1Client
|
||||
quotaV1alpha2 *quotav1alpha2.QuotaV1alpha2Client
|
||||
servicemeshV1alpha2 *servicemeshv1alpha2.ServicemeshV1alpha2Client
|
||||
storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client
|
||||
tenantV1alpha1 *tenantv1alpha1.TenantV1alpha1Client
|
||||
@@ -99,6 +102,11 @@ func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface {
|
||||
return c.networkV1alpha1
|
||||
}
|
||||
|
||||
// QuotaV1alpha2 retrieves the QuotaV1alpha2Client
|
||||
func (c *Clientset) QuotaV1alpha2() quotav1alpha2.QuotaV1alpha2Interface {
|
||||
return c.quotaV1alpha2
|
||||
}
|
||||
|
||||
// ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client
|
||||
func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface {
|
||||
return c.servicemeshV1alpha2
|
||||
@@ -169,6 +177,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.quotaV1alpha2, err = quotav1alpha2.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.servicemeshV1alpha2, err = servicemeshv1alpha2.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -207,6 +219,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
cs.devopsV1alpha3 = devopsv1alpha3.NewForConfigOrDie(c)
|
||||
cs.iamV1alpha2 = iamv1alpha2.NewForConfigOrDie(c)
|
||||
cs.networkV1alpha1 = networkv1alpha1.NewForConfigOrDie(c)
|
||||
cs.quotaV1alpha2 = quotav1alpha2.NewForConfigOrDie(c)
|
||||
cs.servicemeshV1alpha2 = servicemeshv1alpha2.NewForConfigOrDie(c)
|
||||
cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c)
|
||||
cs.tenantV1alpha1 = tenantv1alpha1.NewForConfigOrDie(c)
|
||||
@@ -226,6 +239,7 @@ func New(c rest.Interface) *Clientset {
|
||||
cs.devopsV1alpha3 = devopsv1alpha3.New(c)
|
||||
cs.iamV1alpha2 = iamv1alpha2.New(c)
|
||||
cs.networkV1alpha1 = networkv1alpha1.New(c)
|
||||
cs.quotaV1alpha2 = quotav1alpha2.New(c)
|
||||
cs.servicemeshV1alpha2 = servicemeshv1alpha2.New(c)
|
||||
cs.storageV1alpha1 = storagev1alpha1.New(c)
|
||||
cs.tenantV1alpha1 = tenantv1alpha1.New(c)
|
||||
|
||||
@@ -37,6 +37,8 @@ import (
|
||||
fakeiamv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/iam/v1alpha2/fake"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1"
|
||||
fakenetworkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1/fake"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2"
|
||||
fakequotav1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2/fake"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2"
|
||||
fakeservicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake"
|
||||
storagev1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/storage/v1alpha1"
|
||||
@@ -126,6 +128,11 @@ func (c *Clientset) NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface {
|
||||
return &fakenetworkv1alpha1.FakeNetworkV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// QuotaV1alpha2 retrieves the QuotaV1alpha2Client
|
||||
func (c *Clientset) QuotaV1alpha2() quotav1alpha2.QuotaV1alpha2Interface {
|
||||
return &fakequotav1alpha2.FakeQuotaV1alpha2{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client
|
||||
func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface {
|
||||
return &fakeservicemeshv1alpha2.FakeServicemeshV1alpha2{Fake: &c.Fake}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
|
||||
storagev1alpha1 "kubesphere.io/kubesphere/pkg/apis/storage/v1alpha1"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
@@ -47,6 +48,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
devopsv1alpha3.AddToScheme,
|
||||
iamv1alpha2.AddToScheme,
|
||||
networkv1alpha1.AddToScheme,
|
||||
quotav1alpha2.AddToScheme,
|
||||
servicemeshv1alpha2.AddToScheme,
|
||||
storagev1alpha1.AddToScheme,
|
||||
tenantv1alpha1.AddToScheme,
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
|
||||
iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
|
||||
storagev1alpha1 "kubesphere.io/kubesphere/pkg/apis/storage/v1alpha1"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
@@ -47,6 +48,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
devopsv1alpha3.AddToScheme,
|
||||
iamv1alpha2.AddToScheme,
|
||||
networkv1alpha1.AddToScheme,
|
||||
quotav1alpha2.AddToScheme,
|
||||
servicemeshv1alpha2.AddToScheme,
|
||||
storagev1alpha1.AddToScheme,
|
||||
tenantv1alpha1.AddToScheme,
|
||||
|
||||
20
pkg/client/clientset/versioned/typed/quota/v1alpha2/doc.go
Normal file
20
pkg/client/clientset/versioned/typed/quota/v1alpha2/doc.go
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1alpha2
|
||||
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// Package fake has the automatically generated clients.
|
||||
package fake
|
||||
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/quota/v1alpha2"
|
||||
)
|
||||
|
||||
type FakeQuotaV1alpha2 struct {
|
||||
*testing.Fake
|
||||
}
|
||||
|
||||
func (c *FakeQuotaV1alpha2) ResourceQuotas() v1alpha2.ResourceQuotaInterface {
|
||||
return &FakeResourceQuotas{c}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FakeQuotaV1alpha2) RESTClient() rest.Interface {
|
||||
var ret *rest.RESTClient
|
||||
return ret
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
)
|
||||
|
||||
// FakeResourceQuotas implements ResourceQuotaInterface
|
||||
type FakeResourceQuotas struct {
|
||||
Fake *FakeQuotaV1alpha2
|
||||
}
|
||||
|
||||
var resourcequotasResource = schema.GroupVersionResource{Group: "quota.kubesphere.io", Version: "v1alpha2", Resource: "resourcequotas"}
|
||||
|
||||
var resourcequotasKind = schema.GroupVersionKind{Group: "quota.kubesphere.io", Version: "v1alpha2", Kind: "ResourceQuota"}
|
||||
|
||||
// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
|
||||
func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(resourcequotasResource, name), &v1alpha2.ResourceQuota{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.ResourceQuota), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
|
||||
func (c *FakeResourceQuotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceQuotaList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(resourcequotasResource, resourcequotasKind, opts), &v1alpha2.ResourceQuotaList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha2.ResourceQuotaList{ListMeta: obj.(*v1alpha2.ResourceQuotaList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha2.ResourceQuotaList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested resourceQuotas.
|
||||
func (c *FakeResourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(resourcequotasResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
|
||||
func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.CreateOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(resourcequotasResource, resourceQuota), &v1alpha2.ResourceQuota{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.ResourceQuota), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
|
||||
func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(resourcequotasResource, resourceQuota), &v1alpha2.ResourceQuota{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.ResourceQuota), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (*v1alpha2.ResourceQuota, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(resourcequotasResource, "status", resourceQuota), &v1alpha2.ResourceQuota{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.ResourceQuota), err
|
||||
}
|
||||
|
||||
// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteAction(resourcequotasResource, name), &v1alpha2.ResourceQuota{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(resourcequotasResource, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha2.ResourceQuotaList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched resourceQuota.
|
||||
func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceQuota, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(resourcequotasResource, name, pt, data, subresources...), &v1alpha2.ResourceQuota{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha2.ResourceQuota), err
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
type ResourceQuotaExpansion interface{}
|
||||
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
rest "k8s.io/client-go/rest"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
type QuotaV1alpha2Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
ResourceQuotasGetter
|
||||
}
|
||||
|
||||
// QuotaV1alpha2Client is used to interact with features provided by the quota.kubesphere.io group.
|
||||
type QuotaV1alpha2Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *QuotaV1alpha2Client) ResourceQuotas() ResourceQuotaInterface {
|
||||
return newResourceQuotas(c)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new QuotaV1alpha2Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*QuotaV1alpha2Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &QuotaV1alpha2Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new QuotaV1alpha2Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *QuotaV1alpha2Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new QuotaV1alpha2Client for the given RESTClient.
|
||||
func New(c rest.Interface) *QuotaV1alpha2Client {
|
||||
return &QuotaV1alpha2Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1alpha2.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *QuotaV1alpha2Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restClient
|
||||
}
|
||||
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
// ResourceQuotasGetter has a method to return a ResourceQuotaInterface.
|
||||
// A group's client should implement this interface.
|
||||
type ResourceQuotasGetter interface {
|
||||
ResourceQuotas() ResourceQuotaInterface
|
||||
}
|
||||
|
||||
// ResourceQuotaInterface has methods to work with ResourceQuota resources.
|
||||
type ResourceQuotaInterface interface {
|
||||
Create(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.CreateOptions) (*v1alpha2.ResourceQuota, error)
|
||||
Update(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (*v1alpha2.ResourceQuota, error)
|
||||
UpdateStatus(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (*v1alpha2.ResourceQuota, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceQuota, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceQuotaList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceQuota, err error)
|
||||
ResourceQuotaExpansion
|
||||
}
|
||||
|
||||
// resourceQuotas implements ResourceQuotaInterface
|
||||
type resourceQuotas struct {
|
||||
client rest.Interface
|
||||
}
|
||||
|
||||
// newResourceQuotas returns a ResourceQuotas
|
||||
func newResourceQuotas(c *QuotaV1alpha2Client) *resourceQuotas {
|
||||
return &resourceQuotas{
|
||||
client: c.RESTClient(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
|
||||
func (c *resourceQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
result = &v1alpha2.ResourceQuota{}
|
||||
err = c.client.Get().
|
||||
Resource("resourcequotas").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
|
||||
func (c *resourceQuotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceQuotaList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha2.ResourceQuotaList{}
|
||||
err = c.client.Get().
|
||||
Resource("resourcequotas").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested resourceQuotas.
|
||||
func (c *resourceQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Resource("resourcequotas").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
|
||||
func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.CreateOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
result = &v1alpha2.ResourceQuota{}
|
||||
err = c.client.Post().
|
||||
Resource("resourcequotas").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(resourceQuota).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
|
||||
func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
result = &v1alpha2.ResourceQuota{}
|
||||
err = c.client.Put().
|
||||
Resource("resourcequotas").
|
||||
Name(resourceQuota.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(resourceQuota).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1alpha2.ResourceQuota, opts v1.UpdateOptions) (result *v1alpha2.ResourceQuota, err error) {
|
||||
result = &v1alpha2.ResourceQuota{}
|
||||
err = c.client.Put().
|
||||
Resource("resourcequotas").
|
||||
Name(resourceQuota.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(resourceQuota).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs.
|
||||
func (c *resourceQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Resource("resourcequotas").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Resource("resourcequotas").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched resourceQuota.
|
||||
func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceQuota, err error) {
|
||||
result = &v1alpha2.ResourceQuota{}
|
||||
err = c.client.Patch(pt).
|
||||
Resource("resourcequotas").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
iam "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam"
|
||||
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
|
||||
network "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network"
|
||||
quota "kubesphere.io/kubesphere/pkg/client/informers/externalversions/quota"
|
||||
servicemesh "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh"
|
||||
storage "kubesphere.io/kubesphere/pkg/client/informers/externalversions/storage"
|
||||
tenant "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant"
|
||||
@@ -185,6 +186,7 @@ type SharedInformerFactory interface {
|
||||
Devops() devops.Interface
|
||||
Iam() iam.Interface
|
||||
Network() network.Interface
|
||||
Quota() quota.Interface
|
||||
Servicemesh() servicemesh.Interface
|
||||
Storage() storage.Interface
|
||||
Tenant() tenant.Interface
|
||||
@@ -211,6 +213,10 @@ func (f *sharedInformerFactory) Network() network.Interface {
|
||||
return network.New(f, f.namespace, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) Quota() quota.Interface {
|
||||
return quota.New(f, f.namespace, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) Servicemesh() servicemesh.Interface {
|
||||
return servicemesh.New(f, f.namespace, f.tweakListOptions)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
v1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
|
||||
storagev1alpha1 "kubesphere.io/kubesphere/pkg/apis/storage/v1alpha1"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
@@ -118,6 +119,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
||||
case networkv1alpha1.SchemeGroupVersion.WithResource("namespacenetworkpolicies"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Network().V1alpha1().NamespaceNetworkPolicies().Informer()}, nil
|
||||
|
||||
// Group=quota.kubesphere.io, Version=v1alpha2
|
||||
case quotav1alpha2.SchemeGroupVersion.WithResource("resourcequotas"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Quota().V1alpha2().ResourceQuotas().Informer()}, nil
|
||||
|
||||
// Group=servicemesh.kubesphere.io, Version=v1alpha2
|
||||
case servicemeshv1alpha2.SchemeGroupVersion.WithResource("servicepolicies"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Servicemesh().V1alpha2().ServicePolicies().Informer()}, nil
|
||||
|
||||
46
pkg/client/informers/externalversions/quota/interface.go
Normal file
46
pkg/client/informers/externalversions/quota/interface.go
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/client/informers/externalversions/quota/v1alpha2"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1alpha2 provides access to shared informers for resources in V1alpha2.
|
||||
V1alpha2() v1alpha2.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1alpha2 returns a new v1alpha2.Interface.
|
||||
func (g *group) V1alpha2() v1alpha2.Interface {
|
||||
return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// ResourceQuotas returns a ResourceQuotaInformer.
|
||||
ResourceQuotas() ResourceQuotaInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// ResourceQuotas returns a ResourceQuotaInformer.
|
||||
func (v *version) ResourceQuotas() ResourceQuotaInformer {
|
||||
return &resourceQuotaInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"context"
|
||||
time "time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/client/listers/quota/v1alpha2"
|
||||
)
|
||||
|
||||
// ResourceQuotaInformer provides access to a shared informer and lister for
|
||||
// ResourceQuotas.
|
||||
type ResourceQuotaInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1alpha2.ResourceQuotaLister
|
||||
}
|
||||
|
||||
type resourceQuotaInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewResourceQuotaInformer constructs a new informer for ResourceQuota type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewResourceQuotaInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredResourceQuotaInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredResourceQuotaInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.QuotaV1alpha2().ResourceQuotas().List(context.TODO(), options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.QuotaV1alpha2().ResourceQuotas().Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
"av1alpha2.ResourceQuota{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *resourceQuotaInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredResourceQuotaInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor("av1alpha2.ResourceQuota{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *resourceQuotaInformer) Lister() v1alpha2.ResourceQuotaLister {
|
||||
return v1alpha2.NewResourceQuotaLister(f.Informer().GetIndexer())
|
||||
}
|
||||
23
pkg/client/listers/quota/v1alpha2/expansion_generated.go
Normal file
23
pkg/client/listers/quota/v1alpha2/expansion_generated.go
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
// ResourceQuotaListerExpansion allows custom methods to be added to
|
||||
// ResourceQuotaLister.
|
||||
type ResourceQuotaListerExpansion interface{}
|
||||
65
pkg/client/listers/quota/v1alpha2/resourcequota.go
Normal file
65
pkg/client/listers/quota/v1alpha2/resourcequota.go
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
v1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
)
|
||||
|
||||
// ResourceQuotaLister helps list ResourceQuotas.
|
||||
type ResourceQuotaLister interface {
|
||||
// List lists all ResourceQuotas in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1alpha2.ResourceQuota, err error)
|
||||
// Get retrieves the ResourceQuota from the index for a given name.
|
||||
Get(name string) (*v1alpha2.ResourceQuota, error)
|
||||
ResourceQuotaListerExpansion
|
||||
}
|
||||
|
||||
// resourceQuotaLister implements the ResourceQuotaLister interface.
|
||||
type resourceQuotaLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewResourceQuotaLister returns a new ResourceQuotaLister.
|
||||
func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister {
|
||||
return &resourceQuotaLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all ResourceQuotas in the indexer.
|
||||
func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceQuota, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1alpha2.ResourceQuota))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the ResourceQuota from the index for a given name.
|
||||
func (s *resourceQuotaLister) Get(name string) (*v1alpha2.ResourceQuota, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1alpha2.Resource("resourcequota"), name)
|
||||
}
|
||||
return obj.(*v1alpha2.ResourceQuota), nil
|
||||
}
|
||||
@@ -18,45 +18,239 @@ package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1beta12 "k8s.io/api/networking/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog"
|
||||
servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Add creates a new Application Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
|
||||
// and Start it when the Manager is Started.
|
||||
func Add(mgr manager.Manager) error {
|
||||
return add(mgr, newReconciler(mgr))
|
||||
// ApplicationReconciler reconciles a Application object
|
||||
type ApplicationReconciler struct {
|
||||
client.Client
|
||||
Mapper meta.RESTMapper
|
||||
Scheme *runtime.Scheme
|
||||
ApplicationSelector labels.Selector //
|
||||
}
|
||||
|
||||
// newReconciler returns a new reconcile.Reconciler
|
||||
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
|
||||
return &ReconcileApplication{Client: mgr.GetClient(), scheme: mgr.GetScheme(),
|
||||
recorder: mgr.GetEventRecorderFor("application-controller")}
|
||||
func (r *ApplicationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
var app appv1beta1.Application
|
||||
err := r.Get(context.Background(), req.NamespacedName, &app)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// If label selector were given, only reconcile matched applications
|
||||
// match annotations and labels
|
||||
if !r.ApplicationSelector.Empty() {
|
||||
if !r.ApplicationSelector.Matches(labels.Set(app.Labels)) &&
|
||||
!r.ApplicationSelector.Matches(labels.Set(app.Annotations)) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Application is in the process of being deleted, so no need to do anything.
|
||||
if app.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
resources, errs := r.updateComponents(context.Background(), &app)
|
||||
newApplicationStatus := r.getNewApplicationStatus(context.Background(), &app, resources, &errs)
|
||||
|
||||
newApplicationStatus.ObservedGeneration = app.Generation
|
||||
if equality.Semantic.DeepEqual(newApplicationStatus, &app.Status) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
err = r.updateApplicationStatus(context.Background(), req.NamespacedName, newApplicationStatus)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// add adds a new Controller to mgr with r as the reconcile.Reconciler
|
||||
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
// Create a new controller
|
||||
c, err := controller.New("application-controller", mgr, controller.Options{Reconciler: r})
|
||||
func (r *ApplicationReconciler) updateComponents(ctx context.Context, app *appv1beta1.Application) ([]*unstructured.Unstructured, []error) {
|
||||
var errs []error
|
||||
resources := r.fetchComponentListResources(ctx, app.Spec.ComponentGroupKinds, app.Spec.Selector, app.Namespace, &errs)
|
||||
|
||||
if app.Spec.AddOwnerRef {
|
||||
ownerRef := metav1.NewControllerRef(app, appv1beta1.GroupVersion.WithKind("Application"))
|
||||
*ownerRef.Controller = false
|
||||
if err := r.setOwnerRefForResources(ctx, *ownerRef, resources); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return resources, errs
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) getNewApplicationStatus(ctx context.Context, app *appv1beta1.Application, resources []*unstructured.Unstructured, errList *[]error) *appv1beta1.ApplicationStatus {
|
||||
objectStatuses := r.objectStatuses(ctx, resources, errList)
|
||||
errs := utilerrors.NewAggregate(*errList)
|
||||
|
||||
aggReady, countReady := aggregateReady(objectStatuses)
|
||||
|
||||
newApplicationStatus := app.Status.DeepCopy()
|
||||
newApplicationStatus.ComponentList = appv1beta1.ComponentList{
|
||||
Objects: objectStatuses,
|
||||
}
|
||||
newApplicationStatus.ComponentsReady = fmt.Sprintf("%d/%d", countReady, len(objectStatuses))
|
||||
if errs != nil {
|
||||
setReadyUnknownCondition(newApplicationStatus, "ComponentsReadyUnknown", "failed to aggregate all components' statuses, check the Error condition for details")
|
||||
} else if aggReady {
|
||||
setReadyCondition(newApplicationStatus, "ComponentsReady", "all components ready")
|
||||
} else {
|
||||
setNotReadyCondition(newApplicationStatus, "ComponentsNotReady", fmt.Sprintf("%d components not ready", len(objectStatuses)-countReady))
|
||||
}
|
||||
|
||||
if errs != nil {
|
||||
setErrorCondition(newApplicationStatus, "ErrorSeen", errs.Error())
|
||||
} else {
|
||||
clearErrorCondition(newApplicationStatus)
|
||||
}
|
||||
|
||||
return newApplicationStatus
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) fetchComponentListResources(ctx context.Context, groupKinds []metav1.GroupKind, selector *metav1.LabelSelector, namespace string, errs *[]error) []*unstructured.Unstructured {
|
||||
var resources []*unstructured.Unstructured
|
||||
|
||||
if selector == nil {
|
||||
klog.V(2).Info("No selector is specified")
|
||||
return resources
|
||||
}
|
||||
|
||||
for _, gk := range groupKinds {
|
||||
mapping, err := r.Mapper.RESTMapping(schema.GroupKind{
|
||||
Group: appv1beta1.StripVersion(gk.Group),
|
||||
Kind: gk.Kind,
|
||||
})
|
||||
if err != nil {
|
||||
klog.V(2).Info("NoMappingForGK", "gk", gk.String())
|
||||
continue
|
||||
}
|
||||
|
||||
list := &unstructured.UnstructuredList{}
|
||||
list.SetGroupVersionKind(mapping.GroupVersionKind)
|
||||
if err = r.Client.List(ctx, list, client.InNamespace(namespace), client.MatchingLabels(selector.MatchLabels)); err != nil {
|
||||
klog.Error(err, "unable to list resources for GVK", "gvk", mapping.GroupVersionKind)
|
||||
*errs = append(*errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, u := range list.Items {
|
||||
resource := u
|
||||
resources = append(resources, &resource)
|
||||
}
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) setOwnerRefForResources(ctx context.Context, ownerRef metav1.OwnerReference, resources []*unstructured.Unstructured) error {
|
||||
for _, resource := range resources {
|
||||
ownerRefs := resource.GetOwnerReferences()
|
||||
ownerRefFound := false
|
||||
for i, refs := range ownerRefs {
|
||||
if ownerRef.Kind == refs.Kind &&
|
||||
ownerRef.APIVersion == refs.APIVersion &&
|
||||
ownerRef.Name == refs.Name {
|
||||
ownerRefFound = true
|
||||
if ownerRef.UID != refs.UID {
|
||||
ownerRefs[i] = ownerRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !ownerRefFound {
|
||||
ownerRefs = append(ownerRefs, ownerRef)
|
||||
}
|
||||
resource.SetOwnerReferences(ownerRefs)
|
||||
err := r.Client.Update(ctx, resource)
|
||||
if err != nil {
|
||||
// We log this error, but we continue and try to set the ownerRefs on the other resources.
|
||||
klog.Error(err, "ErrorSettingOwnerRef", "gvk", resource.GroupVersionKind().String(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) objectStatuses(ctx context.Context, resources []*unstructured.Unstructured, errs *[]error) []appv1beta1.ObjectStatus {
|
||||
var objectStatuses []appv1beta1.ObjectStatus
|
||||
for _, resource := range resources {
|
||||
os := appv1beta1.ObjectStatus{
|
||||
Group: resource.GroupVersionKind().Group,
|
||||
Kind: resource.GetKind(),
|
||||
Name: resource.GetName(),
|
||||
Link: resource.GetSelfLink(),
|
||||
}
|
||||
s, err := status(resource)
|
||||
if err != nil {
|
||||
klog.Error(err, "unable to compute status for resource", "gvk", resource.GroupVersionKind().String(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
*errs = append(*errs, err)
|
||||
}
|
||||
os.Status = s
|
||||
objectStatuses = append(objectStatuses, os)
|
||||
}
|
||||
return objectStatuses
|
||||
}
|
||||
|
||||
func aggregateReady(objectStatuses []appv1beta1.ObjectStatus) (bool, int) {
|
||||
countReady := 0
|
||||
for _, os := range objectStatuses {
|
||||
if os.Status == StatusReady {
|
||||
countReady++
|
||||
}
|
||||
}
|
||||
if countReady == len(objectStatuses) {
|
||||
return true, countReady
|
||||
}
|
||||
return false, countReady
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) updateApplicationStatus(ctx context.Context, nn types.NamespacedName, status *appv1beta1.ApplicationStatus) error {
|
||||
if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
original := &appv1beta1.Application{}
|
||||
if err := r.Get(ctx, nn, original); err != nil {
|
||||
return err
|
||||
}
|
||||
original.Status = *status
|
||||
if err := r.Client.Status().Update(ctx, original); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update status of Application %s/%s: %v", nn.Namespace, nn.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := ctrl.NewControllerManagedBy(mgr).
|
||||
Named("application-controller").
|
||||
For(&appv1beta1.Application{}).Build(r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -98,46 +292,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ reconcile.Reconciler = &ReconcileApplication{}
|
||||
|
||||
// ReconcileApplication reconciles a Workspace object
|
||||
type ReconcileApplication struct {
|
||||
client.Client
|
||||
scheme *runtime.Scheme
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=app.k8s.io,resources=applications,verbs=get;list;watch;create;update;patch;delete
|
||||
func (r *ReconcileApplication) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
// Fetch the Application instance
|
||||
ctx := context.Background()
|
||||
app := &v1beta1.Application{}
|
||||
err := r.Get(ctx, request.NamespacedName, app)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Errorf("application %s not found in namespace %s", request.Name, request.Namespace)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
// add specified annotation for app when triggered by sub-resources,
|
||||
// so the application in sigs.k8s.io can reconcile to update status
|
||||
annotations := app.GetObjectMeta().GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
annotations["kubesphere.io/last-updated"] = time.Now().String()
|
||||
app.SetAnnotations(annotations)
|
||||
err = r.Update(ctx, app)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(4).Infof("application %s has been deleted during update in namespace %s", request.Name, request.Namespace)
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
var _ reconcile.Reconciler = &ApplicationReconciler{}
|
||||
|
||||
func isApp(obs ...metav1.Object) bool {
|
||||
for _, o := range obs {
|
||||
|
||||
@@ -19,6 +19,10 @@ package application
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
@@ -27,71 +31,131 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils/servicemesh"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
applicationName = "bookinfo"
|
||||
serviceName = "productpage"
|
||||
timeout = time.Second * 30
|
||||
interval = time.Second * 2
|
||||
)
|
||||
|
||||
var replicas = int32(2)
|
||||
var _ = Describe("Application", func() {
|
||||
|
||||
const timeout = time.Second * 30
|
||||
const interval = time.Second * 1
|
||||
|
||||
var _ = Context("Inside of a new namespace", func() {
|
||||
ctx := context.TODO()
|
||||
ns := SetupTest(ctx)
|
||||
|
||||
service := newService("productpage")
|
||||
app := newAppliation(service)
|
||||
deployments := []*v1.Deployment{newDeployments(service, "v1")}
|
||||
|
||||
BeforeEach(func() {
|
||||
|
||||
// Create application service and deployment
|
||||
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
|
||||
Expect(k8sClient.Create(ctx, service)).Should(Succeed())
|
||||
for i := range deployments {
|
||||
deployment := deployments[i]
|
||||
Expect(k8sClient.Create(ctx, deployment)).Should(Succeed())
|
||||
Describe("Application", func() {
|
||||
applicationLabels := map[string]string{
|
||||
"app.kubernetes.io/name": "bookinfo",
|
||||
"app.kubernetes.io/version": "1",
|
||||
}
|
||||
})
|
||||
|
||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
||||
// your API definition.
|
||||
// Avoid adding tests for vanilla CRUD operations because they would
|
||||
// test Kubernetes API server, which isn't the goal here.
|
||||
Context("Application Controller", func() {
|
||||
It("Should create successfully", func() {
|
||||
BeforeEach(func() {
|
||||
By("create deployment,service,application objects")
|
||||
service := newService(serviceName, ns.Name, applicationLabels)
|
||||
deployments := []*v1.Deployment{newDeployments(serviceName, ns.Name, applicationLabels, "v1")}
|
||||
app := newApplication(applicationName, ns.Name, applicationLabels)
|
||||
|
||||
By("Reconcile Application successfully")
|
||||
// application should have "kubesphere.io/last-updated" annotation
|
||||
Eventually(func() bool {
|
||||
app := &v1beta1.Application{}
|
||||
_ = k8sClient.Get(ctx, types.NamespacedName{Name: service.Labels[servicemesh.ApplicationNameLabel], Namespace: metav1.NamespaceDefault}, app)
|
||||
time, ok := app.Annotations["kubesphere.io/last-updated"]
|
||||
return len(time) > 0 && ok
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
Expect(k8sClient.Create(ctx, service.DeepCopy())).Should(Succeed())
|
||||
for i := range deployments {
|
||||
deployment := deployments[i]
|
||||
Expect(k8sClient.Create(ctx, deployment.DeepCopy())).Should(Succeed())
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
|
||||
})
|
||||
|
||||
Context("Application Controller", func() {
|
||||
It("Should not reconcile application", func() {
|
||||
By("update application labels")
|
||||
application := &v1beta1.Application{}
|
||||
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
|
||||
Expect(err).Should(Succeed())
|
||||
|
||||
updateApplication := func(object interface{}) {
|
||||
newApp := object.(*v1beta1.Application)
|
||||
newApp.Labels["kubesphere.io/creator"] = ""
|
||||
}
|
||||
|
||||
updated, err := updateWithRetries(k8sClient, ctx, application.Namespace, applicationName, updateApplication, 1*time.Second, 5*time.Second)
|
||||
Expect(updated).Should(BeTrue())
|
||||
|
||||
Eventually(func() bool {
|
||||
|
||||
err = k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
|
||||
|
||||
// application status field should not be populated with selected deployments and services
|
||||
return len(application.Status.ComponentList.Objects) == 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
})
|
||||
|
||||
It("Should reconcile application successfully", func() {
|
||||
|
||||
By("check if application status been updated by controller")
|
||||
application := &v1beta1.Application{}
|
||||
|
||||
Eventually(func() bool {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Name: applicationName, Namespace: ns.Name}, application)
|
||||
Expect(err).Should(Succeed())
|
||||
|
||||
// application status field should be populated by controller
|
||||
return len(application.Status.ComponentList.Objects) > 0
|
||||
}, timeout, interval).Should(BeTrue())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newDeployments(service *corev1.Service, version string) *v1.Deployment {
|
||||
lbs := service.Labels
|
||||
lbs["version"] = version
|
||||
type UpdateObjectFunc func(obj interface{})
|
||||
|
||||
func updateWithRetries(client client.Client, ctx context.Context, namespace, name string, updateFunc UpdateObjectFunc, interval, timeout time.Duration) (bool, error) {
|
||||
var updateErr error
|
||||
|
||||
pollErr := wait.PollImmediate(interval, timeout, func() (done bool, err error) {
|
||||
app := &v1beta1.Application{}
|
||||
if err = client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, app); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
updateFunc(app)
|
||||
if err = client.Update(ctx, app); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided update to object %q: %v", name, updateErr)
|
||||
return false, pollErr
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func newDeployments(deploymentName, namespace string, labels map[string]string, version string) *v1.Deployment {
|
||||
labels["app"] = deploymentName
|
||||
labels["version"] = version
|
||||
|
||||
deployment := &v1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", service.Name, version),
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: lbs,
|
||||
Name: fmt.Sprintf("%s-%s", deploymentName, version),
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
|
||||
},
|
||||
Spec: v1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: lbs,
|
||||
MatchLabels: labels,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: lbs,
|
||||
Annotations: service.Annotations,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
@@ -130,16 +194,14 @@ func newDeployments(service *corev1.Service, version string) *v1.Deployment {
|
||||
return deployment
|
||||
}
|
||||
|
||||
func newService(name string) *corev1.Service {
|
||||
func newService(serviceName, namesapce string, labels map[string]string) *corev1.Service {
|
||||
labels["app"] = serviceName
|
||||
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "bookinfo",
|
||||
"app.kubernetes.io/version": "1",
|
||||
"app": name,
|
||||
},
|
||||
Name: serviceName,
|
||||
Namespace: namesapce,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{
|
||||
"servicemesh.kubesphere.io/enabled": "true",
|
||||
},
|
||||
@@ -162,24 +224,20 @@ func newService(name string) *corev1.Service {
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"app.kubernetes.io/name": "bookinfo",
|
||||
"app.kubernetes.io/version": "1",
|
||||
"app": "foo",
|
||||
},
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: labels,
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
Status: corev1.ServiceStatus{},
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
func newAppliation(service *corev1.Service) *v1beta1.Application {
|
||||
func newApplication(applicationName, namespace string, labels map[string]string) *v1beta1.Application {
|
||||
app := &v1beta1.Application{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: service.Labels[servicemesh.ApplicationNameLabel],
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
Labels: service.Labels,
|
||||
Name: applicationName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{servicemesh.ServiceMeshEnabledAnnotation: "true"},
|
||||
},
|
||||
Spec: v1beta1.ApplicationSpec{
|
||||
@@ -193,6 +251,9 @@ func newAppliation(service *corev1.Service) *v1beta1.Application {
|
||||
Kind: "Deployment",
|
||||
},
|
||||
},
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
AddOwnerRef: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -17,12 +17,15 @@ limitations under the License.
|
||||
package application
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/gexec"
|
||||
"context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/klogr"
|
||||
"kubesphere.io/kubesphere/pkg/apis"
|
||||
"os"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -40,8 +43,8 @@ import (
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var k8sManager ctrl.Manager
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestApplicationController(t *testing.T) {
|
||||
@@ -55,44 +58,23 @@ var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(klogr.New())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
t := true
|
||||
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
|
||||
testEnv = &envtest.Environment{
|
||||
UseExistingCluster: &t,
|
||||
}
|
||||
} else {
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")},
|
||||
AttachControlPlaneOutput: false,
|
||||
}
|
||||
|
||||
sch := scheme.Scheme
|
||||
err := appv1beta1.AddToScheme(sch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = apis.AddToScheme(sch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cfg, err := testEnv.Start()
|
||||
var err error
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: sch,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
err = appv1beta1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = apis.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = Add(k8sManager)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
go func() {
|
||||
err = k8sManager.Start(ctrl.SetupSignalHandler())
|
||||
klog.Error(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
k8sClient = k8sManager.GetClient()
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
@@ -100,7 +82,69 @@ var _ = BeforeSuite(func(done Done) {
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
gexec.KillAndWait(5 * time.Second)
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
// SetupTest will setup a testing environment.
|
||||
// This includes:
|
||||
// * creating a Namespace to be used during the test
|
||||
// * starting application controller
|
||||
// * stopping application controller after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
var stopCh chan struct{}
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
|
||||
|
||||
selector, _ := labels.Parse("app.kubernetes.io/name,!kubesphere.io/creator")
|
||||
|
||||
reconciler := &ApplicationReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Mapper: mgr.GetRESTMapper(),
|
||||
ApplicationSelector: selector,
|
||||
}
|
||||
err = reconciler.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup application reconciler")
|
||||
|
||||
go func() {
|
||||
err = mgr.Start(stopCh)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
|
||||
err := k8sClient.Delete(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
|
||||
})
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
|
||||
|
||||
func randStringRunes(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
package application
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -1,7 +1,7 @@
|
||||
// Copyright 2020 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
package application
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -44,6 +42,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
fedv1b1 "sigs.k8s.io/kubefed/pkg/apis/core/v1beta1"
|
||||
@@ -56,12 +55,12 @@ import (
|
||||
)
|
||||
|
||||
// Cluster controller only runs under multicluster mode. Cluster controller is following below steps,
|
||||
// 1. Populates proxy spec if cluster connection type is proxy
|
||||
// 1.1 Wait for cluster agent is ready if connection type is proxy
|
||||
// 1. Wait for cluster agent is ready if connection type is proxy
|
||||
// 2. Join cluster into federation control plane if kubeconfig is ready.
|
||||
// 3. Pull cluster version and configz, set result to cluster status
|
||||
// Also put all clusters back into queue every 5 * time.Minute to sync cluster status, this is needed
|
||||
// in case there aren't any cluster changes made.
|
||||
// Also check if all of the clusters are ready by the spec.connection.kubeconfig every resync period
|
||||
|
||||
const (
|
||||
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
|
||||
@@ -83,17 +82,14 @@ const (
|
||||
portRangeMin = 6000
|
||||
portRangeMax = 7000
|
||||
|
||||
// Proxy service port
|
||||
kubernetesPort = 6443
|
||||
kubespherePort = 80
|
||||
|
||||
defaultAgentNamespace = "kubesphere-system"
|
||||
|
||||
// proxy format
|
||||
proxyFormat = "%s/api/v1/namespaces/kubesphere-system/services/:ks-apiserver:80/proxy/%s"
|
||||
|
||||
// mulitcluster configuration name
|
||||
configzMultiCluster = "multicluster"
|
||||
|
||||
// probe cluster timeout
|
||||
probeClusterTimeout = 3 * time.Second
|
||||
)
|
||||
|
||||
// Cluster template for reconcile host cluster if there is none.
|
||||
@@ -223,12 +219,16 @@ func (c *clusterController) Run(workers int, stopCh <-chan struct{}) error {
|
||||
go wait.Until(c.worker, c.workerLoopPeriod, stopCh)
|
||||
}
|
||||
|
||||
// refresh cluster configz every 2 minutes
|
||||
// refresh cluster configz every resync period
|
||||
go wait.Until(func() {
|
||||
if err := c.reconcileHostCluster(); err != nil {
|
||||
klog.Errorf("Error create host cluster, error %v", err)
|
||||
}
|
||||
|
||||
if err := c.probeClusters(); err != nil {
|
||||
klog.Errorf("failed to reconcile cluster ready status, err: %v", err)
|
||||
}
|
||||
|
||||
}, c.resyncPeriod, stopCh)
|
||||
|
||||
<-stopCh
|
||||
@@ -348,6 +348,80 @@ func (c *clusterController) reconcileHostCluster() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *clusterController) probeClusters() error {
|
||||
clusters, err := c.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
if len(cluster.Spec.Connection.KubeConfig) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
clientConfig, err := clientcmd.NewClientConfigFromBytes(cluster.Spec.Connection.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
continue
|
||||
}
|
||||
config.Timeout = probeClusterTimeout
|
||||
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
var con clusterv1alpha1.ClusterCondition
|
||||
_, err = clientSet.Discovery().ServerVersion()
|
||||
if err == nil {
|
||||
con = clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: string(clusterv1alpha1.ClusterReady),
|
||||
Message: "Cluster is available now",
|
||||
}
|
||||
} else {
|
||||
con = clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionFalse,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "failed to connect get kubernetes version",
|
||||
Message: "Cluster is not available now",
|
||||
}
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, con)
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
ct, err := c.clusterClient.Get(context.TODO(), cluster.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ct.Status.Conditions = cluster.Status.Conditions
|
||||
ct, err = c.clusterClient.Update(context.TODO(), ct, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update cluster %s status, err: %v", cluster.Name, err)
|
||||
} else {
|
||||
klog.V(4).Infof("successfully updated cluster %s to status %v", cluster.Name, con)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *clusterController) syncCluster(key string) error {
|
||||
klog.V(5).Infof("starting to sync cluster %s", key)
|
||||
startTime := time.Now()
|
||||
@@ -363,6 +437,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
}()
|
||||
|
||||
cluster, err := c.clusterLister.Get(name)
|
||||
|
||||
if err != nil {
|
||||
// cluster not found, possibly been deleted
|
||||
// need to do the cleanup
|
||||
@@ -374,9 +449,6 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// proxy service name if needed
|
||||
serviceName := fmt.Sprintf("mc-%s", cluster.Name)
|
||||
|
||||
if cluster.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object. This is equivalent
|
||||
@@ -399,22 +471,6 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.client.CoreV1().Services(defaultAgentNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// nothing to do
|
||||
} else {
|
||||
klog.Errorf("Failed to get proxy service %s, error %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = c.client.CoreV1().Services(defaultAgentNamespace).Delete(context.TODO(), serviceName, *metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to delete service %s, error %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// clean up openpitrix runtime of the cluster
|
||||
if _, ok := cluster.Annotations[openpitrixRuntime]; ok {
|
||||
if c.openpitrixClient != nil {
|
||||
@@ -438,136 +494,18 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// save a old copy of cluster
|
||||
oldCluster := cluster.DeepCopy()
|
||||
|
||||
// currently we didn't set cluster.Spec.Enable when creating cluster at client side, so only check
|
||||
// if we enable cluster.Spec.JoinFederation now
|
||||
if cluster.Spec.JoinFederation == false {
|
||||
return nil
|
||||
}
|
||||
|
||||
// save a old copy of cluster
|
||||
oldCluster := cluster.DeepCopy()
|
||||
|
||||
// prepare for proxy to member cluster
|
||||
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
|
||||
|
||||
// allocate ports for kubernetes and kubesphere endpoint
|
||||
if cluster.Spec.Connection.KubeSphereAPIServerPort == 0 ||
|
||||
cluster.Spec.Connection.KubernetesAPIServerPort == 0 {
|
||||
port, err := c.allocatePort()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
cluster.Spec.Connection.KubernetesAPIServerPort = port
|
||||
cluster.Spec.Connection.KubeSphereAPIServerPort = port + 10000
|
||||
}
|
||||
|
||||
// token uninitialized, generate a new token
|
||||
if len(cluster.Spec.Connection.Token) == 0 {
|
||||
cluster.Spec.Connection.Token = c.generateToken()
|
||||
}
|
||||
|
||||
// create a proxy service spec
|
||||
mcService := v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: cluster.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": serviceName,
|
||||
"app": serviceName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app.kubernetes.io/name": "tower",
|
||||
"app": "tower",
|
||||
},
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "kubernetes",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: kubernetesPort,
|
||||
TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubernetesAPIServerPort)),
|
||||
},
|
||||
{
|
||||
Name: "kubesphere",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: kubespherePort,
|
||||
TargetPort: intstr.FromInt(int(cluster.Spec.Connection.KubeSphereAPIServerPort)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service, err := c.client.CoreV1().Services(defaultAgentNamespace).Get(context.TODO(), serviceName, metav1.GetOptions{})
|
||||
if err != nil { // proxy service not found
|
||||
if errors.IsNotFound(err) {
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Create(context.TODO(), &mcService, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
} else { // update existed proxy service
|
||||
if !reflect.DeepEqual(service.Spec, mcService.Spec) {
|
||||
mcService.ObjectMeta = service.ObjectMeta
|
||||
mcService.Spec.ClusterIP = service.Spec.ClusterIP
|
||||
|
||||
service, err = c.client.CoreV1().Services(defaultAgentNamespace).Update(context.TODO(), &mcService, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// populates the kubernetes apiEndpoint and kubesphere apiEndpoint
|
||||
cluster.Spec.Connection.KubernetesAPIEndpoint = fmt.Sprintf("https://%s:%d", service.Spec.ClusterIP, kubernetesPort)
|
||||
cluster.Spec.Connection.KubeSphereAPIEndpoint = fmt.Sprintf("http://%s:%d", service.Spec.ClusterIP, kubespherePort)
|
||||
|
||||
initializedCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: string(clusterv1alpha1.ClusterInitialized),
|
||||
Message: "Cluster has been initialized",
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
}
|
||||
|
||||
if !isConditionTrue(cluster, clusterv1alpha1.ClusterInitialized) {
|
||||
c.updateClusterCondition(cluster, initializedCondition)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldCluster, cluster) {
|
||||
cluster, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// agent status unavailable, which means the agent disconnected from the server or has not connected to the server
|
||||
// we need to update the cluster ready status unavailable and return.
|
||||
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy &&
|
||||
!isConditionTrue(cluster, clusterv1alpha1.ClusterAgentAvailable) {
|
||||
clusterNotReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionFalse,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "Unable to establish connection with cluster",
|
||||
Message: "Cluster is not available now",
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, clusterNotReadyCondition)
|
||||
|
||||
cluster, err = c.clusterClient.Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Error updating cluster %s, error %s", cluster.Name, err)
|
||||
}
|
||||
return err
|
||||
// cluster not ready, nothing to do
|
||||
if !isConditionTrue(cluster, clusterv1alpha1.ClusterReady) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// build up cached cluster data if there isn't any
|
||||
@@ -594,10 +532,10 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
_, err = c.joinFederation(clusterDt.config, cluster.Name, cluster.Labels)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to join federation for cluster %s, error %v", cluster.Name, err)
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeWarning, "JoinFederation", err.Error())
|
||||
return err
|
||||
}
|
||||
c.eventRecorder.Event(cluster, v1.EventTypeNormal, "JoinFederation", "Cluster has joined federation.")
|
||||
|
||||
klog.Infof("successfully joined federation for cluster %s", cluster.Name)
|
||||
|
||||
federationReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterFederated,
|
||||
@@ -611,7 +549,7 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
c.updateClusterCondition(cluster, federationReadyCondition)
|
||||
}
|
||||
|
||||
// cluster agent is ready, we can pull kubernetes cluster info through agent
|
||||
// cluster is ready, we can pull kubernetes cluster info through agent
|
||||
// since there is no agent necessary for host cluster, so updates for host cluster
|
||||
// is safe.
|
||||
if len(cluster.Spec.Connection.KubernetesAPIEndpoint) == 0 {
|
||||
@@ -647,17 +585,6 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
cluster.Labels[clusterv1alpha1.HostCluster] = ""
|
||||
}
|
||||
|
||||
clusterReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: string(clusterv1alpha1.ClusterReady),
|
||||
Message: "Cluster is available now",
|
||||
}
|
||||
|
||||
c.updateClusterCondition(cluster, clusterReadyCondition)
|
||||
|
||||
if c.openpitrixClient != nil { // OpenPitrix is enabled, create runtime
|
||||
if cluster.GetAnnotations() == nil {
|
||||
cluster.Annotations = make(map[string]string)
|
||||
@@ -746,16 +673,6 @@ func (c *clusterController) addCluster(obj interface{}) {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
func hasHostClusterLabel(cluster *clusterv1alpha1.Cluster) bool {
|
||||
if cluster.Labels == nil || len(cluster.Labels) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := cluster.Labels[clusterv1alpha1.HostCluster]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *clusterController) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
@@ -855,43 +772,3 @@ func (c *clusterController) unJoinFederation(clusterConfig *rest.Config, unjoini
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// allocatePort find a available port between [portRangeMin, portRangeMax] in maximumRetries
|
||||
// TODO: only works with handful clusters
|
||||
func (c *clusterController) allocatePort() (uint16, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
clusters, err := c.clusterLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
const maximumRetries = 10
|
||||
for i := 0; i < maximumRetries; i++ {
|
||||
collision := false
|
||||
port := uint16(portRangeMin + rand.Intn(portRangeMax-portRangeMin+1))
|
||||
|
||||
for _, item := range clusters {
|
||||
if item.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy &&
|
||||
item.Spec.Connection.KubernetesAPIServerPort != 0 &&
|
||||
item.Spec.Connection.KubeSphereAPIServerPort == port {
|
||||
collision = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !collision {
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unable to allocate port after %d retries", maximumRetries)
|
||||
}
|
||||
|
||||
// generateToken returns a random 32-byte string as token
|
||||
func (c *clusterController) generateToken() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
return fmt.Sprintf("%x", b)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package devopscredential
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/emicklei/go-restful"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -36,10 +37,12 @@ import (
|
||||
devopsv1alpha3 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha3"
|
||||
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils"
|
||||
modelsdevops "kubesphere.io/kubesphere/pkg/models/devops"
|
||||
devopsClient "kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -218,7 +221,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
return err
|
||||
}
|
||||
if !isDevOpsProjectAdminNamespace(namespace) {
|
||||
err := fmt.Errorf("cound not create credential in normal namespaces %s", namespace.Name)
|
||||
err := fmt.Errorf("cound not create or update credential '%s' in normal namespaces %s", name, namespace.Name)
|
||||
klog.Warning(err)
|
||||
return err
|
||||
}
|
||||
@@ -233,14 +236,26 @@ func (c *Controller) syncHandler(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//If the sync is successful, return handle
|
||||
if state, ok := secret.Annotations[devopsv1alpha3.CredentialSyncStatusAnnoKey]; ok && state == modelsdevops.StatusSuccessful {
|
||||
return nil
|
||||
}
|
||||
|
||||
copySecret := secret.DeepCopy()
|
||||
// DeletionTimestamp.IsZero() means copySecret has not been deleted.
|
||||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if copySecret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// make sure Annotations is not nil
|
||||
if copySecret.Annotations == nil {
|
||||
copySecret.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
//If the sync is successful, return handle
|
||||
if state, ok := copySecret.Annotations[devopsv1alpha3.CredentialSyncStatusAnnoKey]; ok && state == modelsdevops.StatusSuccessful {
|
||||
specHash := utils.ComputeHash(copySecret.Data)
|
||||
oldHash, _ := copySecret.Annotations[devopsv1alpha3.DevOpsCredentialDataHash] // don't need to check if it's nil, only compare if they're different
|
||||
if specHash == oldHash {
|
||||
// it was synced successfully, and there's any change with the Pipeline spec, skip this round
|
||||
return nil
|
||||
} else {
|
||||
copySecret.Annotations[devopsv1alpha3.DevOpsCredentialDataHash] = specHash
|
||||
}
|
||||
}
|
||||
|
||||
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers
|
||||
if !sliceutil.HasString(secret.ObjectMeta.Finalizers, devopsv1alpha3.CredentialFinalizerName) {
|
||||
copySecret.ObjectMeta.Finalizers = append(copySecret.ObjectMeta.Finalizers, devopsv1alpha3.CredentialFinalizerName)
|
||||
@@ -268,13 +283,31 @@ func (c *Controller) syncHandler(key string) error {
|
||||
} else {
|
||||
// Finalizers processing logic
|
||||
if sliceutil.HasString(copySecret.ObjectMeta.Finalizers, devopsv1alpha3.CredentialFinalizerName) {
|
||||
delSuccess := false
|
||||
if _, err := c.devopsClient.DeleteCredentialInProject(nsName, secret.Name); err != nil {
|
||||
// the status code should be 404 if the credential does not exists
|
||||
if srvErr, ok := err.(restful.ServiceError); ok {
|
||||
delSuccess = srvErr.Code == http.StatusNotFound
|
||||
} else if srvErr, ok := err.(*devopsClient.ErrorResponse); ok {
|
||||
delSuccess = srvErr.Response.StatusCode == http.StatusNotFound
|
||||
} else {
|
||||
klog.Error(fmt.Sprintf("unexpected error type: %v, should be *restful.ServiceError", err))
|
||||
}
|
||||
|
||||
klog.V(8).Info(err, fmt.Sprintf("failed to delete secret %s in devops", key))
|
||||
return err
|
||||
} else {
|
||||
delSuccess = true
|
||||
}
|
||||
|
||||
if delSuccess {
|
||||
copySecret.ObjectMeta.Finalizers = sliceutil.RemoveString(copySecret.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == devopsv1alpha3.CredentialFinalizerName
|
||||
})
|
||||
} else {
|
||||
// make sure the corresponding Jenkins credentials can be clean
|
||||
// You can remove the finalizer via kubectl manually in a very special case that Jenkins might be not able to available anymore
|
||||
return fmt.Errorf("failed to remove devops credential finalizer due to bad communication with Jenkins")
|
||||
}
|
||||
copySecret.ObjectMeta.Finalizers = sliceutil.RemoveString(copySecret.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == devopsv1alpha3.CredentialFinalizerName
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -37,7 +39,6 @@ import (
|
||||
controllerutils "kubesphere.io/kubesphere/pkg/controller/utils/controller"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"reflect"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
@@ -129,19 +130,24 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// initialize subresource if created by kubesphere
|
||||
if workspace := namespace.Labels[tenantv1alpha1.WorkspaceLabel]; workspace != "" {
|
||||
// Bind to workspace if the namespace created by kubesphere
|
||||
_, hasWorkspaceLabel := namespace.Labels[tenantv1alpha1.WorkspaceLabel]
|
||||
if hasWorkspaceLabel {
|
||||
if err := r.bindWorkspace(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := r.initRoles(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
if err := r.unbindWorkspace(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
// Initialize roles for devops/project namespaces if created by kubesphere
|
||||
_, hasDevOpsProjectLabel := namespace.Labels[constants.DevOpsProjectLabelKey]
|
||||
if hasDevOpsProjectLabel || hasWorkspaceLabel {
|
||||
if err := r.initRoles(rootCtx, logger, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
r.Recorder.Event(namespace, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
@@ -19,32 +19,36 @@ package ippool
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
podv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
coreinfomers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
networkInformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network/v1alpha1"
|
||||
tenantv1alpha1informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/utils"
|
||||
"kubesphere.io/kubesphere/pkg/controller/network/webhooks"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/network/ippool"
|
||||
"reflect"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -61,6 +65,13 @@ type IPPoolController struct {
|
||||
ippoolSynced cache.InformerSynced
|
||||
ippoolQueue workqueue.RateLimitingInterface
|
||||
|
||||
wsInformer tenantv1alpha1informers.WorkspaceInformer
|
||||
wsSynced cache.InformerSynced
|
||||
|
||||
nsInformer coreinfomers.NamespaceInformer
|
||||
nsSynced cache.InformerSynced
|
||||
nsQueue workqueue.RateLimitingInterface
|
||||
|
||||
ipamblockInformer networkInformer.IPAMBlockInformer
|
||||
ipamblockSynced cache.InformerSynced
|
||||
|
||||
@@ -68,31 +79,25 @@ type IPPoolController struct {
|
||||
kubesphereClient kubesphereclient.Interface
|
||||
}
|
||||
|
||||
func (c *IPPoolController) ippoolHandle(obj interface{}) {
|
||||
func (c *IPPoolController) enqueueIPPools(obj interface{}) {
|
||||
pool, ok := obj.(*networkv1alpha1.IPPool)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("IPPool informer returned non-ippool object: %#v", obj))
|
||||
return
|
||||
}
|
||||
key, err := cache.MetaNamespaceKeyFunc(pool)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for ippool %#v: %v", pool, err))
|
||||
return
|
||||
}
|
||||
|
||||
if utils.NeedToAddFinalizer(pool, networkv1alpha1.IPPoolFinalizer) || utils.IsDeletionCandidate(pool, networkv1alpha1.IPPoolFinalizer) {
|
||||
c.ippoolQueue.Add(key)
|
||||
}
|
||||
c.ippoolQueue.Add(pool.Name)
|
||||
}
|
||||
|
||||
func (c *IPPoolController) addFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
clone := pool.DeepCopy()
|
||||
controllerutil.AddFinalizer(clone, networkv1alpha1.IPPoolFinalizer)
|
||||
clone.Labels = map[string]string{
|
||||
networkv1alpha1.IPPoolNameLabel: clone.Name,
|
||||
networkv1alpha1.IPPoolTypeLabel: clone.Spec.Type,
|
||||
networkv1alpha1.IPPoolIDLabel: fmt.Sprintf("%d", clone.ID()),
|
||||
if clone.Labels == nil {
|
||||
clone.Labels = make(map[string]string)
|
||||
}
|
||||
clone.Labels[networkv1alpha1.IPPoolNameLabel] = clone.Name
|
||||
clone.Labels[networkv1alpha1.IPPoolTypeLabel] = clone.Spec.Type
|
||||
clone.Labels[networkv1alpha1.IPPoolIDLabel] = fmt.Sprintf("%d", clone.ID())
|
||||
pool, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.V(3).Infof("Error adding finalizer to pool %s: %v", pool.Name, err)
|
||||
@@ -116,12 +121,15 @@ func (c *IPPoolController) removeFinalizer(pool *networkv1alpha1.IPPool) error {
|
||||
|
||||
func (c *IPPoolController) ValidateCreate(obj runtime.Object) error {
|
||||
b := obj.(*networkv1alpha1.IPPool)
|
||||
_, cidr, err := cnet.ParseCIDR(b.Spec.CIDR)
|
||||
ip, cidr, err := cnet.ParseCIDR(b.Spec.CIDR)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cidr")
|
||||
}
|
||||
|
||||
size, _ := cidr.Mask.Size()
|
||||
if ip.IP.To4() != nil && size == 32 {
|
||||
return fmt.Errorf("the cidr mask must be less than 32")
|
||||
}
|
||||
if b.Spec.BlockSize > 0 && b.Spec.BlockSize < size {
|
||||
return fmt.Errorf("the blocksize should be larger than the cidr mask")
|
||||
}
|
||||
@@ -163,6 +171,25 @@ func (c *IPPoolController) ValidateCreate(obj runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *IPPoolController) validateDefaultIPPool(p *networkv1alpha1.IPPool) error {
|
||||
pools, err := c.kubesphereClient.NetworkV1alpha1().IPPools().List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
networkv1alpha1.IPPoolDefaultLabel: "",
|
||||
}).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
poolLen := len(pools.Items)
|
||||
if poolLen != 1 || pools.Items[0].Name != p.Name {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Must ensure that there is at least one default ippool")
|
||||
}
|
||||
|
||||
func (c *IPPoolController) ValidateUpdate(old runtime.Object, new runtime.Object) error {
|
||||
oldP := old.(*networkv1alpha1.IPPool)
|
||||
newP := new.(*networkv1alpha1.IPPool)
|
||||
@@ -183,6 +210,15 @@ func (c *IPPoolController) ValidateUpdate(old runtime.Object, new runtime.Object
|
||||
return fmt.Errorf("ippool rangeEnd/rangeStart cannot be modified")
|
||||
}
|
||||
|
||||
_, defaultOld := oldP.Labels[networkv1alpha1.IPPoolDefaultLabel]
|
||||
_, defaultNew := newP.Labels[networkv1alpha1.IPPoolDefaultLabel]
|
||||
if !defaultNew && defaultOld != defaultNew {
|
||||
err := c.validateDefaultIPPool(newP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -193,7 +229,7 @@ func (c *IPPoolController) ValidateDelete(obj runtime.Object) error {
|
||||
return fmt.Errorf("ippool is in use, please remove the workload before deleting")
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.validateDefaultIPPool(p)
|
||||
}
|
||||
|
||||
func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
|
||||
@@ -204,7 +240,7 @@ func (c *IPPoolController) disableIPPool(old *networkv1alpha1.IPPool) error {
|
||||
clone := old.DeepCopy()
|
||||
clone.Spec.Disabled = true
|
||||
|
||||
old, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
_, err := c.kubesphereClient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -305,19 +341,20 @@ func (c *IPPoolController) Run(workers int, stopCh <-chan struct{}) error {
|
||||
klog.Info("starting ippool controller")
|
||||
defer klog.Info("shutting down ippool controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, c.ippoolSynced, c.ipamblockSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, c.ippoolSynced, c.ipamblockSynced, c.wsSynced, c.nsSynced) {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
go wait.Until(c.runIPPoolWorker, time.Second, stopCh)
|
||||
go wait.Until(c.runNSWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *IPPoolController) runWorker() {
|
||||
func (c *IPPoolController) runIPPoolWorker() {
|
||||
for c.processIPPoolItem() {
|
||||
}
|
||||
}
|
||||
@@ -329,26 +366,78 @@ func (c *IPPoolController) processIPPoolItem() bool {
|
||||
}
|
||||
defer c.ippoolQueue.Done(key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key.(string))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error parsing ippool key %q: %v", key, err))
|
||||
return true
|
||||
}
|
||||
|
||||
delay, err := c.processIPPool(name)
|
||||
delay, err := c.processIPPool(key.(string))
|
||||
if err == nil {
|
||||
c.ippoolQueue.Forget(key)
|
||||
return true
|
||||
} else if delay != nil {
|
||||
c.ippoolQueue.AddAfter(key, *delay)
|
||||
}
|
||||
|
||||
if delay != nil {
|
||||
c.ippoolQueue.AddAfter(key, *delay)
|
||||
} else {
|
||||
c.ippoolQueue.AddRateLimited(key)
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("error processing ippool %v (will retry): %v", key, err))
|
||||
c.ippoolQueue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *IPPoolController) ipamblockHandle(obj interface{}) {
|
||||
func (c *IPPoolController) runNSWorker() {
|
||||
for c.processNSItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *IPPoolController) processNS(name string) error {
|
||||
ns, err := c.nsInformer.Lister().Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
var poolsName []string
|
||||
if ns.Labels != nil && ns.Labels[constants.WorkspaceLabelKey] != "" {
|
||||
pools, err := c.ippoolInformer.Lister().List(labels.SelectorFromSet(labels.Set{
|
||||
networkv1alpha1.IPPoolDefaultLabel: "",
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
poolsName = append(poolsName, pool.Name)
|
||||
}
|
||||
}
|
||||
|
||||
clone := ns.DeepCopy()
|
||||
err = c.provider.UpdateNamespace(clone, poolsName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if reflect.DeepEqual(clone, ns) {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = c.client.CoreV1().Namespaces().Update(context.TODO(), clone, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *IPPoolController) processNSItem() bool {
|
||||
key, quit := c.nsQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.nsQueue.Done(key)
|
||||
|
||||
err := c.processNS(key.(string))
|
||||
if err == nil {
|
||||
c.nsQueue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
c.nsQueue.AddRateLimited(key)
|
||||
utilruntime.HandleError(fmt.Errorf("error processing ns %v (will retry): %v", key, err))
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *IPPoolController) enqueueIPAMBlocks(obj interface{}) {
|
||||
block, ok := obj.(*networkv1alpha1.IPAMBlock)
|
||||
if !ok {
|
||||
return
|
||||
@@ -358,9 +447,47 @@ func (c *IPPoolController) ipamblockHandle(obj interface{}) {
|
||||
c.ippoolQueue.Add(poolName)
|
||||
}
|
||||
|
||||
func (c *IPPoolController) enqueueWorkspace(obj interface{}) {
|
||||
wk, ok := obj.(*tenantv1alpha1.Workspace)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
pools, err := c.ippoolInformer.Lister().List(labels.SelectorFromSet(labels.Set{
|
||||
constants.WorkspaceLabelKey: wk.Name,
|
||||
}))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list ippools by worksapce %s, err=%v", wk.Name, err)
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
c.ippoolQueue.Add(pool.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *IPPoolController) enqueueNamespace(old interface{}, new interface{}) {
|
||||
workspaceOld := ""
|
||||
if old != nil {
|
||||
nsOld := old.(*corev1.Namespace)
|
||||
if nsOld.Labels != nil {
|
||||
workspaceOld = nsOld.Labels[constants.WorkspaceLabelKey]
|
||||
}
|
||||
}
|
||||
|
||||
nsNew := new.(*corev1.Namespace)
|
||||
workspaceNew := ""
|
||||
if nsNew.Labels != nil {
|
||||
workspaceNew = nsNew.Labels[constants.WorkspaceLabelKey]
|
||||
}
|
||||
|
||||
if workspaceOld != workspaceNew {
|
||||
c.nsQueue.Add(nsNew.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func NewIPPoolController(
|
||||
ippoolInformer networkInformer.IPPoolInformer,
|
||||
ipamblockInformer networkInformer.IPAMBlockInformer,
|
||||
kubesphereInformers ksinformers.SharedInformerFactory,
|
||||
kubernetesInformers k8sinformers.SharedInformerFactory,
|
||||
client clientset.Interface,
|
||||
kubesphereClient kubesphereclient.Interface,
|
||||
provider ippool.Provider) *IPPoolController {
|
||||
@@ -369,43 +496,71 @@ func NewIPPoolController(
|
||||
broadcaster.StartLogging(func(format string, args ...interface{}) {
|
||||
klog.Info(fmt.Sprintf(format, args))
|
||||
})
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ippool-controller"})
|
||||
broadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "ippool-controller"})
|
||||
|
||||
c := &IPPoolController{
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
ippoolInformer: ippoolInformer,
|
||||
ippoolSynced: ippoolInformer.Informer().HasSynced,
|
||||
ippoolQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ippool"),
|
||||
ipamblockInformer: ipamblockInformer,
|
||||
ipamblockSynced: ipamblockInformer.Informer().HasSynced,
|
||||
client: client,
|
||||
kubesphereClient: kubesphereClient,
|
||||
provider: provider,
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
ippoolQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ippool"),
|
||||
nsQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ippool-ns"),
|
||||
client: client,
|
||||
kubesphereClient: kubesphereClient,
|
||||
provider: provider,
|
||||
}
|
||||
c.ippoolInformer = kubesphereInformers.Network().V1alpha1().IPPools()
|
||||
c.ippoolSynced = c.ippoolInformer.Informer().HasSynced
|
||||
c.ipamblockInformer = kubesphereInformers.Network().V1alpha1().IPAMBlocks()
|
||||
c.ipamblockSynced = c.ipamblockInformer.Informer().HasSynced
|
||||
c.wsInformer = kubesphereInformers.Tenant().V1alpha1().Workspaces()
|
||||
c.wsSynced = c.wsInformer.Informer().HasSynced
|
||||
c.nsInformer = kubernetesInformers.Core().V1().Namespaces()
|
||||
c.nsSynced = c.nsInformer.Informer().HasSynced
|
||||
|
||||
ippoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.ippoolHandle,
|
||||
c.ippoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.enqueueIPPools,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
c.ippoolHandle(new)
|
||||
_, defaultOld := old.(*networkv1alpha1.IPPool).Labels[networkv1alpha1.IPPoolDefaultLabel]
|
||||
_, defaultNew := new.(*networkv1alpha1.IPPool).Labels[networkv1alpha1.IPPoolDefaultLabel]
|
||||
if defaultOld != defaultNew {
|
||||
nss, err := c.nsInformer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range nss {
|
||||
c.enqueueNamespace(nil, ns)
|
||||
}
|
||||
}
|
||||
c.enqueueIPPools(new)
|
||||
},
|
||||
})
|
||||
|
||||
//just for update ippool status
|
||||
ipamblockInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.ipamblockHandle,
|
||||
c.ipamblockInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.enqueueIPAMBlocks,
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
c.ipamblockHandle(new)
|
||||
c.enqueueIPAMBlocks(new)
|
||||
},
|
||||
DeleteFunc: c.ipamblockHandle,
|
||||
DeleteFunc: c.enqueueIPAMBlocks,
|
||||
})
|
||||
|
||||
c.wsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: c.enqueueWorkspace,
|
||||
})
|
||||
|
||||
c.nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(new interface{}) {
|
||||
c.enqueueNamespace(nil, new)
|
||||
},
|
||||
UpdateFunc: c.enqueueNamespace,
|
||||
})
|
||||
|
||||
//register ippool webhook
|
||||
webhooks.RegisterValidator(networkv1alpha1.SchemeGroupVersion.WithKind(networkv1alpha1.ResourceKindIPPool).String(),
|
||||
&webhooks.ValidatorWrap{Obj: &networkv1alpha1.IPPool{}, Helper: c})
|
||||
webhooks.RegisterDefaulter(podv1.SchemeGroupVersion.WithKind("Pod").String(),
|
||||
&webhooks.DefaulterWrap{Obj: &podv1.Pod{}, Helper: provider})
|
||||
webhooks.RegisterDefaulter(corev1.SchemeGroupVersion.WithKind("Pod").String(),
|
||||
&webhooks.DefaulterWrap{Obj: &corev1.Pod{}, Helper: provider})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
@@ -45,6 +46,10 @@ func TestIPPoolSuit(t *testing.T) {
|
||||
RunSpecs(t, "IPPool Suite")
|
||||
}
|
||||
|
||||
var (
|
||||
alwaysReady = func() bool { return true }
|
||||
)
|
||||
|
||||
var _ = Describe("test ippool", func() {
|
||||
pool := &v1alpha1.IPPool{
|
||||
TypeMeta: v1.TypeMeta{},
|
||||
@@ -60,16 +65,16 @@ var _ = Describe("test ippool", func() {
|
||||
|
||||
ksclient := ksfake.NewSimpleClientset()
|
||||
k8sclinet := k8sfake.NewSimpleClientset()
|
||||
p := ippool.NewProvider(nil, ksclient, k8sclinet, v1alpha1.IPPoolTypeLocal, nil)
|
||||
ipamClient := ipam.NewIPAMClient(ksclient, v1alpha1.VLAN)
|
||||
|
||||
ksInformer := ksinformers.NewSharedInformerFactory(ksclient, 0)
|
||||
ippoolInformer := ksInformer.Network().V1alpha1().IPPools()
|
||||
ipamblockInformer := ksInformer.Network().V1alpha1().IPAMBlocks()
|
||||
c := NewIPPoolController(ippoolInformer, ipamblockInformer, k8sclinet, ksclient, p)
|
||||
k8sInformer := k8sinformers.NewSharedInformerFactory(k8sclinet, 0)
|
||||
|
||||
p := ippool.NewProvider(k8sInformer, ksclient, k8sclinet, v1alpha1.IPPoolTypeLocal, nil)
|
||||
ipamClient := ipam.NewIPAMClient(ksclient, v1alpha1.VLAN)
|
||||
c := NewIPPoolController(ksInformer, k8sInformer, k8sclinet, ksclient, p)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
go ksInformer.Start(stopCh)
|
||||
go k8sInformer.Start(stopCh)
|
||||
go c.Start(stopCh)
|
||||
|
||||
It("test create ippool", func() {
|
||||
|
||||
@@ -19,14 +19,10 @@ package pipeline
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/emicklei/go-restful"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1informer "k8s.io/client-go/informers/core/v1"
|
||||
@@ -43,6 +39,7 @@ import (
|
||||
devopsinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/devops/v1alpha3"
|
||||
devopslisters "kubesphere.io/kubesphere/pkg/client/listers/devops/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/controller/utils"
|
||||
modelsdevops "kubesphere.io/kubesphere/pkg/models/devops"
|
||||
devopsClient "kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
@@ -240,7 +237,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
|
||||
//If the sync is successful, return handle
|
||||
if state, ok := copyPipeline.Annotations[devopsv1alpha3.PipelineSyncStatusAnnoKey]; ok && state == modelsdevops.StatusSuccessful {
|
||||
specHash := computeHash(copyPipeline.Spec)
|
||||
specHash := utils.ComputeHash(copyPipeline.Spec)
|
||||
oldHash, _ := copyPipeline.Annotations[devopsv1alpha3.PipelineSpecHash] // don't need to check if it's nil, only compare if they're different
|
||||
if specHash == oldHash {
|
||||
// it was synced successfully, and there's any change with the Pipeline spec, skip this round
|
||||
@@ -319,30 +316,6 @@ func (c *Controller) syncHandler(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeHash(obj interface{}) string {
|
||||
hasher := fnv.New32a()
|
||||
deepHashObject(hasher, obj)
|
||||
return rand.SafeEncodeString(fmt.Sprint(hasher.Sum32()))
|
||||
}
|
||||
|
||||
// deepHashObject writes specified object to hash using the spew library
|
||||
// which follows pointers and prints actual values of the nested objects
|
||||
// ensuring the hash does not change when a pointer changes.
|
||||
// **Notice**
|
||||
// we don't want to import k8s.io/kubernetes as a module, but this is a very small function
|
||||
// so just copy it from k8s.io/kubernetes@v1.14.0/pkg/util/hash/hash.go
|
||||
// **Notice End**
|
||||
func deepHashObject(hasher hash.Hash, objectToWrite interface{}) {
|
||||
hasher.Reset()
|
||||
printer := spew.ConfigState{
|
||||
Indent: " ",
|
||||
SortKeys: true,
|
||||
DisableMethods: true,
|
||||
SpewKeys: true,
|
||||
}
|
||||
printer.Fprintf(hasher, "%#v", objectToWrite)
|
||||
}
|
||||
|
||||
func isDevOpsProjectAdminNamespace(namespace *v1.Namespace) bool {
|
||||
_, ok := namespace.Labels[constants.DevOpsProjectLabelKey]
|
||||
|
||||
|
||||
206
pkg/controller/quota/accessor.go
Normal file
206
pkg/controller/quota/accessor.go
Normal file
@@ -0,0 +1,206 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
utilwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
etcd "k8s.io/apiserver/pkg/storage/etcd3"
|
||||
utilquota "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
)
|
||||
|
||||
// Following code copied from github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota
|
||||
|
||||
type accessor struct {
|
||||
client client.Client
|
||||
|
||||
// updatedResourceQuotas holds a cache of quotas that we've updated. This is used to pull the "really latest" during back to
|
||||
// back quota evaluations that touch the same quota doc. This only works because we can compare etcd resourceVersions
|
||||
// for the same resource as integers. Before this change: 22 updates with 12 conflicts. after this change: 15 updates with 0 conflicts
|
||||
updatedResourceQuotas *lru.Cache
|
||||
}
|
||||
|
||||
// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
|
||||
func newQuotaAccessor(client client.Client) *accessor {
|
||||
updatedCache, err := lru.New(100)
|
||||
if err != nil {
|
||||
// this should never happen
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &accessor{
|
||||
client: client,
|
||||
updatedResourceQuotas: updatedCache,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateQuotaStatus the newQuota coming in will be incremented from the original. The difference between the original
|
||||
// and the new is the amount to add to the namespace total, but the total status is the used value itself
|
||||
func (a *accessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error {
|
||||
// skipping namespaced resource quota
|
||||
if newQuota.APIVersion != quotav1alpha2.SchemeGroupVersion.String() {
|
||||
klog.V(6).Infof("skipping namespaced resource quota %v %v", newQuota.Namespace, newQuota.Name)
|
||||
return nil
|
||||
}
|
||||
ctx := context.TODO()
|
||||
resourceQuota := "av1alpha2.ResourceQuota{}
|
||||
err := a.client.Get(ctx, types.NamespacedName{Name: newQuota.Name}, resourceQuota)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to fetch resource quota: %s, %v", newQuota.Name, err)
|
||||
return err
|
||||
}
|
||||
resourceQuota = a.checkCache(resourceQuota)
|
||||
|
||||
// re-assign objectmeta
|
||||
// make a copy
|
||||
updatedQuota := resourceQuota.DeepCopy()
|
||||
updatedQuota.ObjectMeta = newQuota.ObjectMeta
|
||||
updatedQuota.Namespace = ""
|
||||
|
||||
// determine change in usage
|
||||
usageDiff := utilquota.Subtract(newQuota.Status.Used, updatedQuota.Status.Total.Used)
|
||||
|
||||
// update aggregate usage
|
||||
updatedQuota.Status.Total.Used = newQuota.Status.Used
|
||||
|
||||
// update per namespace totals
|
||||
oldNamespaceTotals, _ := getResourceQuotasStatusByNamespace(updatedQuota.Status.Namespaces, newQuota.Namespace)
|
||||
namespaceTotalCopy := oldNamespaceTotals.DeepCopy()
|
||||
newNamespaceTotals := *namespaceTotalCopy
|
||||
newNamespaceTotals.Used = utilquota.Add(oldNamespaceTotals.Used, usageDiff)
|
||||
insertResourceQuotasStatus(&updatedQuota.Status.Namespaces, quotav1alpha2.ResourceQuotaStatusByNamespace{
|
||||
Namespace: newQuota.Namespace,
|
||||
ResourceQuotaStatus: newNamespaceTotals,
|
||||
})
|
||||
|
||||
klog.V(6).Infof("update resource quota: %+v", updatedQuota)
|
||||
err = a.client.Status().Update(ctx, updatedQuota, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update resource quota: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
a.updatedResourceQuotas.Add(resourceQuota.Name, updatedQuota)
|
||||
return nil
|
||||
}
|
||||
|
||||
var etcdVersioner = etcd.APIObjectVersioner{}
|
||||
|
||||
// checkCache compares the passed quota against the value in the look-aside cache and returns the newer
|
||||
// if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions
|
||||
// being monotonically increasing integers
|
||||
func (a *accessor) checkCache(resourceQuota *quotav1alpha2.ResourceQuota) *quotav1alpha2.ResourceQuota {
|
||||
uncastCachedQuota, ok := a.updatedResourceQuotas.Get(resourceQuota.Name)
|
||||
if !ok {
|
||||
return resourceQuota
|
||||
}
|
||||
cachedQuota := uncastCachedQuota.(*quotav1alpha2.ResourceQuota)
|
||||
|
||||
if etcdVersioner.CompareResourceVersion(resourceQuota, cachedQuota) >= 0 {
|
||||
a.updatedResourceQuotas.Remove(resourceQuota.Name)
|
||||
return resourceQuota
|
||||
}
|
||||
return cachedQuota
|
||||
}
|
||||
|
||||
func (a *accessor) GetQuotas(namespaceName string) ([]corev1.ResourceQuota, error) {
|
||||
resourceQuotaNames, err := a.waitForReadyResourceQuotaNames(namespaceName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to fetch resource quota names: %v, %v", namespaceName, err)
|
||||
return nil, err
|
||||
}
|
||||
var result []corev1.ResourceQuota
|
||||
for _, resourceQuotaName := range resourceQuotaNames {
|
||||
resourceQuota := "av1alpha2.ResourceQuota{}
|
||||
err = a.client.Get(context.TODO(), types.NamespacedName{Name: resourceQuotaName}, resourceQuota)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to fetch resource quota %s: %v", resourceQuotaName, err)
|
||||
return result, err
|
||||
}
|
||||
resourceQuota = a.checkCache(resourceQuota)
|
||||
|
||||
// now convert to a ResourceQuota
|
||||
convertedQuota := corev1.ResourceQuota{}
|
||||
convertedQuota.APIVersion = quotav1alpha2.SchemeGroupVersion.String()
|
||||
convertedQuota.ObjectMeta = resourceQuota.ObjectMeta
|
||||
convertedQuota.Namespace = namespaceName
|
||||
convertedQuota.Spec = resourceQuota.Spec.Quota
|
||||
convertedQuota.Status = resourceQuota.Status.Total
|
||||
result = append(result, convertedQuota)
|
||||
}
|
||||
|
||||
// avoid conflicts with namespaced resource quota
|
||||
namespacedResourceQuotas, err := a.waitForReadyNamespacedResourceQuotas(namespaceName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to fetch namespaced resource quotas: %v, %v", namespaceName, err)
|
||||
return nil, err
|
||||
}
|
||||
for _, resourceQuota := range namespacedResourceQuotas {
|
||||
resourceQuota.APIVersion = corev1.SchemeGroupVersion.String()
|
||||
result = append(result, resourceQuota)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (a *accessor) waitForReadyResourceQuotaNames(namespaceName string) ([]string, error) {
|
||||
ctx := context.TODO()
|
||||
var resourceQuotaNames []string
|
||||
var err error
|
||||
// wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds.
|
||||
err = utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) {
|
||||
resourceQuotaNames, err = resourceQuotaNamesFor(ctx, a.client, namespaceName)
|
||||
// if we can't find the namespace yet, just wait for the cache to update. Requests to non-existent namespaces
|
||||
// may hang, but those people are doing something wrong and namespace lifecycle should reject them.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return resourceQuotaNames, err
|
||||
}
|
||||
|
||||
func (a *accessor) waitForReadyNamespacedResourceQuotas(namespaceName string) ([]corev1.ResourceQuota, error) {
|
||||
ctx := context.TODO()
|
||||
var resourceQuotas []corev1.ResourceQuota
|
||||
var err error
|
||||
// wait for a valid mapping cache. The overall response can be delayed for up to 10 seconds.
|
||||
err = utilwait.PollImmediate(100*time.Millisecond, 8*time.Second, func() (done bool, err error) {
|
||||
resourceQuotaList := &corev1.ResourceQuotaList{}
|
||||
err = a.client.List(ctx, resourceQuotaList, &client.ListOptions{Namespace: namespaceName})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resourceQuotas = resourceQuotaList.Items
|
||||
return true, nil
|
||||
})
|
||||
return resourceQuotas, err
|
||||
}
|
||||
59
pkg/controller/quota/lockfactory.go
Normal file
59
pkg/controller/quota/lockfactory.go
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Following code copied from github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota
|
||||
type LockFactory interface {
|
||||
GetLock(string) sync.Locker
|
||||
}
|
||||
|
||||
type DefaultLockFactory struct {
|
||||
lock sync.RWMutex
|
||||
|
||||
locks map[string]sync.Locker
|
||||
}
|
||||
|
||||
func NewDefaultLockFactory() *DefaultLockFactory {
|
||||
return &DefaultLockFactory{locks: map[string]sync.Locker{}}
|
||||
}
|
||||
|
||||
func (f *DefaultLockFactory) GetLock(key string) sync.Locker {
|
||||
lock, exists := f.getExistingLock(key)
|
||||
if exists {
|
||||
return lock
|
||||
}
|
||||
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
lock = &sync.Mutex{}
|
||||
f.locks[key] = lock
|
||||
return lock
|
||||
}
|
||||
|
||||
func (f *DefaultLockFactory) getExistingLock(key string) (sync.Locker, bool) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
lock, exists := f.locks[key]
|
||||
return lock, exists
|
||||
}
|
||||
299
pkg/controller/quota/resourcequota_controller.go
Normal file
299
pkg/controller/quota/resourcequota_controller.go
Normal file
@@ -0,0 +1,299 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
evaluatorcore "kubesphere.io/kubesphere/kube/pkg/quota/v1/evaluator/core"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/install"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"math"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"time"
|
||||
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
quotav1 "kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
ControllerName = "resourcequota-controller"
|
||||
DefaultResyncPeriod = 5 * time.Minute
|
||||
DefaultMaxConcurrentReconciles = 8
|
||||
)
|
||||
|
||||
// Reconciler reconciles a Workspace object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
logger logr.Logger
|
||||
recorder record.EventRecorder
|
||||
maxConcurrentReconciles int
|
||||
// Knows how to calculate usage
|
||||
registry quotav1.Registry
|
||||
// Controls full recalculation of quota usage
|
||||
resyncPeriod time.Duration
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int, resyncPeriod time.Duration, informerFactory k8sinformers.SharedInformerFactory) error {
|
||||
r.logger = ctrl.Log.WithName("controllers").WithName(ControllerName)
|
||||
r.recorder = mgr.GetEventRecorderFor(ControllerName)
|
||||
r.scheme = mgr.GetScheme()
|
||||
r.registry = generic.NewRegistry(install.NewQuotaConfigurationForControllers(generic.ListerFuncForResourceFunc(informerFactory.ForResource)).Evaluators())
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
if maxConcurrentReconciles > 0 {
|
||||
r.maxConcurrentReconciles = maxConcurrentReconciles
|
||||
} else {
|
||||
r.maxConcurrentReconciles = DefaultMaxConcurrentReconciles
|
||||
}
|
||||
r.resyncPeriod = time.Duration(math.Max(float64(resyncPeriod), float64(DefaultResyncPeriod)))
|
||||
c, err := ctrl.NewControllerManagedBy(mgr).
|
||||
Named(ControllerName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: r.maxConcurrentReconciles,
|
||||
}).
|
||||
For("av1alpha2.ResourceQuota{}).
|
||||
WithEventFilter(predicate.GenerationChangedPredicate{
|
||||
Funcs: predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
oldQuota := e.ObjectOld.(*quotav1alpha2.ResourceQuota)
|
||||
newQuota := e.ObjectNew.(*quotav1alpha2.ResourceQuota)
|
||||
return !equality.Semantic.DeepEqual(oldQuota.Spec, newQuota.Spec)
|
||||
},
|
||||
},
|
||||
}).
|
||||
Build(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resources := []runtime.Object{
|
||||
&corev1.Pod{},
|
||||
&corev1.Service{},
|
||||
&corev1.PersistentVolumeClaim{},
|
||||
}
|
||||
realClock := clock.RealClock{}
|
||||
for _, resource := range resources {
|
||||
err := c.Watch(
|
||||
&source.Kind{Type: resource},
|
||||
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.mapper)},
|
||||
predicate.Funcs{
|
||||
GenericFunc: func(e event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return false
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
notifyChange := false
|
||||
// we only want to queue the updates we care about though as too much noise will overwhelm queue.
|
||||
switch e.MetaOld.(type) {
|
||||
case *corev1.Pod:
|
||||
oldPod := e.ObjectOld.(*corev1.Pod)
|
||||
newPod := e.ObjectNew.(*corev1.Pod)
|
||||
notifyChange = evaluatorcore.QuotaV1Pod(oldPod, realClock) && !evaluatorcore.QuotaV1Pod(newPod, realClock)
|
||||
case *corev1.Service:
|
||||
oldService := e.ObjectOld.(*corev1.Service)
|
||||
newService := e.ObjectNew.(*corev1.Service)
|
||||
notifyChange = evaluatorcore.GetQuotaServiceType(oldService) != evaluatorcore.GetQuotaServiceType(newService)
|
||||
case *corev1.PersistentVolumeClaim:
|
||||
notifyChange = true
|
||||
}
|
||||
return notifyChange
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return true
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) mapper(h handler.MapObject) []reconcile.Request {
|
||||
// check if the quota controller can evaluate this kind, if not, ignore it altogether...
|
||||
var result []reconcile.Request
|
||||
evaluators := r.registry.List()
|
||||
ctx := context.TODO()
|
||||
resourceQuotaNames, err := resourceQuotaNamesFor(ctx, r.Client, h.Meta.GetNamespace())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get resource quota names for: %v %T %v, err: %v", h.Meta.GetNamespace(), h.Object, h.Meta.GetName(), err)
|
||||
return result
|
||||
}
|
||||
// only queue those quotas that are tracking a resource associated with this kind.
|
||||
for _, resourceQuotaName := range resourceQuotaNames {
|
||||
resourceQuota := "av1alpha2.ResourceQuota{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: resourceQuotaName}, resourceQuota); err != nil {
|
||||
klog.Errorf("failed to get resource quota: %v, err: %v", resourceQuotaName, err)
|
||||
return result
|
||||
}
|
||||
resourceQuotaResources := quotav1.ResourceNames(resourceQuota.Status.Total.Hard)
|
||||
for _, evaluator := range evaluators {
|
||||
matchedResources := evaluator.MatchingResources(resourceQuotaResources)
|
||||
if len(matchedResources) > 0 {
|
||||
result = append(result, reconcile.Request{NamespacedName: types.NamespacedName{Name: resourceQuotaName}})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
klog.V(6).Infof("resource quota reconcile after resource change: %v %T %v, %+v", h.Meta.GetNamespace(), h.Object, h.Meta.GetName(), result)
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.logger.WithValues("resourcequota", req.NamespacedName)
|
||||
rootCtx := context.TODO()
|
||||
resourceQuota := "av1alpha2.ResourceQuota{}
|
||||
if err := r.Get(rootCtx, req.NamespacedName, resourceQuota); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if err := r.bindWorkspace(resourceQuota); err != nil {
|
||||
logger.Error(err, "failed to set owner reference")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.syncQuotaForNamespaces(resourceQuota); err != nil {
|
||||
logger.Error(err, "failed to sync quota")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
r.recorder.Event(resourceQuota, corev1.EventTypeNormal, "Synced", "Synced successfully")
|
||||
return ctrl.Result{RequeueAfter: r.resyncPeriod}, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) bindWorkspace(resourceQuota *quotav1alpha2.ResourceQuota) error {
|
||||
workspaceName := resourceQuota.Labels[constants.WorkspaceLabelKey]
|
||||
if workspaceName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
workspace := &tenantv1alpha1.Workspace{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace)
|
||||
if err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !metav1.IsControlledBy(resourceQuota, workspace) {
|
||||
resourceQuota.OwnerReferences = nil
|
||||
if err := controllerutil.SetControllerReference(workspace, resourceQuota, r.scheme); err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.Update(context.TODO(), resourceQuota)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) syncQuotaForNamespaces(originalQuota *quotav1alpha2.ResourceQuota) error {
|
||||
quota := originalQuota.DeepCopy()
|
||||
ctx := context.TODO()
|
||||
// get the list of namespaces that match this cluster quota
|
||||
matchingNamespaceList := corev1.NamespaceList{}
|
||||
if err := r.List(ctx, &matchingNamespaceList, &client.ListOptions{LabelSelector: labels.SelectorFromSet(quota.Spec.LabelSelector)}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
matchingNamespaceNames := make([]string, 0)
|
||||
for _, namespace := range matchingNamespaceList.Items {
|
||||
matchingNamespaceNames = append(matchingNamespaceNames, namespace.Name)
|
||||
}
|
||||
|
||||
for _, namespace := range matchingNamespaceList.Items {
|
||||
namespaceName := namespace.Name
|
||||
namespaceTotals, _ := getResourceQuotasStatusByNamespace(quota.Status.Namespaces, namespaceName)
|
||||
|
||||
actualUsage, err := quotaUsageCalculationFunc(namespaceName, quota.Spec.Quota.Scopes, quota.Spec.Quota.Hard, r.registry, quota.Spec.Quota.ScopeSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recalculatedStatus := corev1.ResourceQuotaStatus{
|
||||
Used: actualUsage,
|
||||
Hard: quota.Spec.Quota.Hard,
|
||||
}
|
||||
|
||||
// subtract old usage, add new usage
|
||||
quota.Status.Total.Used = quotav1.Subtract(quota.Status.Total.Used, namespaceTotals.Used)
|
||||
quota.Status.Total.Used = quotav1.Add(quota.Status.Total.Used, recalculatedStatus.Used)
|
||||
insertResourceQuotasStatus("a.Status.Namespaces, quotav1alpha2.ResourceQuotaStatusByNamespace{
|
||||
Namespace: namespaceName,
|
||||
ResourceQuotaStatus: recalculatedStatus,
|
||||
})
|
||||
}
|
||||
|
||||
// Remove any namespaces from quota.status that no longer match.
|
||||
statusCopy := quota.Status.Namespaces.DeepCopy()
|
||||
for _, namespaceTotals := range statusCopy {
|
||||
namespaceName := namespaceTotals.Namespace
|
||||
if !sliceutil.HasString(matchingNamespaceNames, namespaceName) {
|
||||
quota.Status.Total.Used = quotav1.Subtract(quota.Status.Total.Used, namespaceTotals.Used)
|
||||
removeResourceQuotasStatusByNamespace("a.Status.Namespaces, namespaceName)
|
||||
}
|
||||
}
|
||||
|
||||
quota.Status.Total.Hard = quota.Spec.Quota.Hard
|
||||
|
||||
// if there's no change, no update, return early. NewAggregate returns nil on empty input
|
||||
if equality.Semantic.DeepEqual(quota, originalQuota) {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(6).Infof("update resource quota: %+v", quota)
|
||||
if err := r.Status().Update(ctx, quota, &client.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// quotaUsageCalculationFunc is a function to calculate quota usage. It is only configurable for easy unit testing
|
||||
// NEVER CHANGE THIS OUTSIDE A TEST
|
||||
var quotaUsageCalculationFunc = quotav1.CalculateUsage
|
||||
191
pkg/controller/quota/resourcequota_webhook.go
Normal file
191
pkg/controller/quota/resourcequota_webhook.go
Normal file
@@ -0,0 +1,191 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
admissionapi "k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/generic"
|
||||
"kubesphere.io/kubesphere/kube/pkg/quota/v1/install"
|
||||
"kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota"
|
||||
resourcequotaapi "kubesphere.io/kubesphere/kube/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
"net/http"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
numEvaluatorThreads = 10
|
||||
)
|
||||
|
||||
type ResourceQuotaAdmission struct {
|
||||
client client.Client
|
||||
|
||||
decoder *webhook.AdmissionDecoder
|
||||
|
||||
lockFactory LockFactory
|
||||
|
||||
// these are used to create the evaluator
|
||||
registry quota.Registry
|
||||
|
||||
init sync.Once
|
||||
evaluator resourcequota.Evaluator
|
||||
}
|
||||
|
||||
func NewResourceQuotaAdmission(client client.Client, scheme *runtime.Scheme) (webhook.AdmissionHandler, error) {
|
||||
decoder, err := admission.NewDecoder(scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ResourceQuotaAdmission{
|
||||
client: client,
|
||||
lockFactory: NewDefaultLockFactory(),
|
||||
decoder: decoder,
|
||||
registry: generic.NewRegistry(install.NewQuotaConfigurationForAdmission().Evaluators()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *ResourceQuotaAdmission) Handle(ctx context.Context, req webhook.AdmissionRequest) webhook.AdmissionResponse {
|
||||
// ignore all operations that correspond to sub-resource actions
|
||||
if len(req.RequestSubResource) != 0 {
|
||||
return webhook.Allowed("")
|
||||
}
|
||||
// ignore cluster level resources
|
||||
if len(req.Namespace) == 0 {
|
||||
return webhook.Allowed("")
|
||||
}
|
||||
|
||||
r.init.Do(func() {
|
||||
resourceQuotaAccessor := newQuotaAccessor(r.client)
|
||||
r.evaluator = resourcequota.NewQuotaEvaluator(resourceQuotaAccessor, install.DefaultIgnoredResources(), r.registry, r.lockAquisition, &resourcequotaapi.Configuration{}, numEvaluatorThreads, utilwait.NeverStop)
|
||||
})
|
||||
|
||||
attributesRecord, err := convertToAdmissionAttributes(req)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return webhook.Errored(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
if err := r.evaluator.Evaluate(attributesRecord); err != nil {
|
||||
if errors.IsForbidden(err) {
|
||||
klog.Info(err)
|
||||
return webhook.Denied(err.Error())
|
||||
}
|
||||
klog.Error(err)
|
||||
return webhook.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
return webhook.Allowed("")
|
||||
}
|
||||
|
||||
type ByName []corev1.ResourceQuota
|
||||
|
||||
func (v ByName) Len() int { return len(v) }
|
||||
func (v ByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
|
||||
func (v ByName) Less(i, j int) bool { return v[i].Name < v[j].Name }
|
||||
|
||||
func (r *ResourceQuotaAdmission) lockAquisition(quotas []corev1.ResourceQuota) func() {
|
||||
var locks []sync.Locker
|
||||
|
||||
// acquire the locks in alphabetical order because I'm too lazy to think of something clever
|
||||
sort.Sort(ByName(quotas))
|
||||
for _, quota := range quotas {
|
||||
lock := r.lockFactory.GetLock(string(quota.UID))
|
||||
lock.Lock()
|
||||
locks = append(locks, lock)
|
||||
}
|
||||
|
||||
return func() {
|
||||
for i := len(locks) - 1; i >= 0; i-- {
|
||||
locks[i].Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func convertToAdmissionAttributes(req admission.Request) (admissionapi.Attributes, error) {
|
||||
var err error
|
||||
var object runtime.Object
|
||||
if len(req.Object.Raw) > 0 {
|
||||
object, _, err = scheme.Codecs.UniversalDeserializer().Decode(req.Object.Raw, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var oldObject runtime.Object
|
||||
if len(req.OldObject.Raw) > 0 {
|
||||
oldObject, _, err = scheme.Codecs.UniversalDeserializer().Decode(req.OldObject.Raw, nil, nil)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var operationOptions runtime.Object
|
||||
if len(req.Options.Raw) > 0 {
|
||||
operationOptions, _, err = scheme.Codecs.UniversalDeserializer().Decode(req.Options.Raw, nil, nil)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
extras := map[string][]string{}
|
||||
for k, v := range req.UserInfo.Extra {
|
||||
extras[k] = v
|
||||
}
|
||||
|
||||
attributesRecord := admissionapi.NewAttributesRecord(object,
|
||||
oldObject,
|
||||
schema.GroupVersionKind{
|
||||
Group: req.RequestKind.Group,
|
||||
Version: req.RequestKind.Version,
|
||||
Kind: req.RequestKind.Kind,
|
||||
},
|
||||
req.Namespace,
|
||||
req.Name,
|
||||
schema.GroupVersionResource{
|
||||
Group: req.RequestResource.Group,
|
||||
Version: req.RequestResource.Version,
|
||||
Resource: req.RequestResource.Resource,
|
||||
},
|
||||
req.SubResource,
|
||||
admissionapi.Operation(req.Operation),
|
||||
operationOptions,
|
||||
*req.DryRun,
|
||||
&user.DefaultInfo{
|
||||
Name: req.UserInfo.Username,
|
||||
UID: req.UserInfo.UID,
|
||||
Groups: req.UserInfo.Groups,
|
||||
Extra: extras,
|
||||
})
|
||||
return attributesRecord, nil
|
||||
}
|
||||
92
pkg/controller/quota/util.go
Normal file
92
pkg/controller/quota/util.go
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package quota
|
||||
|
||||
import (
|
||||
"context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Following code copied from github.com/openshift/library-go/pkg/quota/quotautil
|
||||
func getResourceQuotasStatusByNamespace(namespaceStatuses quotav1alpha2.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) {
|
||||
for i := range namespaceStatuses {
|
||||
curr := namespaceStatuses[i]
|
||||
if curr.Namespace == namespace {
|
||||
return curr.ResourceQuotaStatus, true
|
||||
}
|
||||
}
|
||||
return corev1.ResourceQuotaStatus{}, false
|
||||
}
|
||||
|
||||
func removeResourceQuotasStatusByNamespace(namespaceStatuses *quotav1alpha2.ResourceQuotasStatusByNamespace, namespace string) {
|
||||
newNamespaceStatuses := quotav1alpha2.ResourceQuotasStatusByNamespace{}
|
||||
for i := range *namespaceStatuses {
|
||||
curr := (*namespaceStatuses)[i]
|
||||
if curr.Namespace == namespace {
|
||||
continue
|
||||
}
|
||||
newNamespaceStatuses = append(newNamespaceStatuses, curr)
|
||||
}
|
||||
*namespaceStatuses = newNamespaceStatuses
|
||||
}
|
||||
|
||||
func insertResourceQuotasStatus(namespaceStatuses *quotav1alpha2.ResourceQuotasStatusByNamespace, newStatus quotav1alpha2.ResourceQuotaStatusByNamespace) {
|
||||
newNamespaceStatuses := quotav1alpha2.ResourceQuotasStatusByNamespace{}
|
||||
found := false
|
||||
for i := range *namespaceStatuses {
|
||||
curr := (*namespaceStatuses)[i]
|
||||
if curr.Namespace == newStatus.Namespace {
|
||||
// do this so that we don't change serialization order
|
||||
newNamespaceStatuses = append(newNamespaceStatuses, newStatus)
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newNamespaceStatuses = append(newNamespaceStatuses, curr)
|
||||
}
|
||||
if !found {
|
||||
newNamespaceStatuses = append(newNamespaceStatuses, newStatus)
|
||||
}
|
||||
*namespaceStatuses = newNamespaceStatuses
|
||||
}
|
||||
|
||||
func resourceQuotaNamesFor(ctx context.Context, client client.Client, namespaceName string) ([]string, error) {
|
||||
namespace := &corev1.Namespace{}
|
||||
var resourceQuotaNames []string
|
||||
if err := client.Get(ctx, types.NamespacedName{Name: namespaceName}, namespace); err != nil {
|
||||
return resourceQuotaNames, err
|
||||
}
|
||||
if len(namespace.Labels) == 0 {
|
||||
return resourceQuotaNames, nil
|
||||
}
|
||||
resourceQuotaList := "av1alpha2.ResourceQuotaList{}
|
||||
if err := client.List(ctx, resourceQuotaList); err != nil {
|
||||
return resourceQuotaNames, err
|
||||
}
|
||||
for _, resourceQuota := range resourceQuotaList.Items {
|
||||
if len(resourceQuota.Spec.LabelSelector) > 0 &&
|
||||
labels.SelectorFromSet(resourceQuota.Spec.LabelSelector).Matches(labels.Set(namespace.Labels)) {
|
||||
resourceQuotaNames = append(resourceQuotaNames, resourceQuota.Name)
|
||||
}
|
||||
}
|
||||
return resourceQuotaNames, nil
|
||||
}
|
||||
34
pkg/controller/utils/hash.go
Normal file
34
pkg/controller/utils/hash.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
)
|
||||
|
||||
// ComputeHash computes hash value of a interface
|
||||
func ComputeHash(obj interface{}) string {
|
||||
hasher := fnv.New32a()
|
||||
deepHashObject(hasher, obj)
|
||||
return rand.SafeEncodeString(fmt.Sprint(hasher.Sum32()))
|
||||
}
|
||||
|
||||
// deepHashObject writes specified object to hash using the spew library
|
||||
// which follows pointers and prints actual values of the nested objects
|
||||
// ensuring the hash does not change when a pointer changes.
|
||||
// **Notice**
|
||||
// we don't want to import k8s.io/kubernetes as a module, but this is a very small function
|
||||
// so just copy it from k8s.io/kubernetes@v1.14.0/pkg/util/hash/hash.go
|
||||
// **Notice End**
|
||||
func deepHashObject(hasher hash.Hash, objectToWrite interface{}) {
|
||||
hasher.Reset()
|
||||
printer := spew.ConfigState{
|
||||
Indent: " ",
|
||||
SortKeys: true,
|
||||
DisableMethods: true,
|
||||
SpewKeys: true,
|
||||
}
|
||||
printer.Fprintf(hasher, "%#v", objectToWrite)
|
||||
}
|
||||
@@ -20,6 +20,9 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
log "k8s.io/klog"
|
||||
@@ -33,8 +36,6 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
clientDevOps "kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const jenkinsHeaderPre = "X-"
|
||||
@@ -112,7 +113,8 @@ func (h *ProjectPipelineHandler) ListPipelines(req *restful.Request, resp *restf
|
||||
} else {
|
||||
pipelineMap[pipeline.Name] = i
|
||||
pipelineList.Items[i] = clientDevOps.Pipeline{
|
||||
Name: pipeline.Name,
|
||||
Name: pipeline.Name,
|
||||
Annotations: pipeline.Annotations,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,7 +132,10 @@ func (h *ProjectPipelineHandler) ListPipelines(req *restful.Request, resp *restf
|
||||
} else {
|
||||
for i, _ := range res.Items {
|
||||
if index, ok := pipelineMap[res.Items[i].Name]; ok {
|
||||
// keep annotations field of pipelineList
|
||||
annotations := pipelineList.Items[index].Annotations
|
||||
pipelineList.Items[index] = res.Items[i]
|
||||
pipelineList.Items[index].Annotations = annotations
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -544,7 +544,6 @@ func (h *iamHandler) UpdateUser(request *restful.Request, response *restful.Resp
|
||||
var user iamv1alpha2.User
|
||||
|
||||
err := request.ReadEntity(&user)
|
||||
|
||||
if err != nil {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
return
|
||||
@@ -566,7 +565,6 @@ func (h *iamHandler) UpdateUser(request *restful.Request, response *restful.Resp
|
||||
}
|
||||
|
||||
operator, ok := apirequest.UserFrom(request.Request.Context())
|
||||
|
||||
if globalRole != "" && ok {
|
||||
err = h.updateGlobalRoleBinding(operator, updated, globalRole)
|
||||
if err != nil {
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
auditingv1alpha1 "kubesphere.io/kubesphere/pkg/api/auditing/v1alpha1"
|
||||
eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1"
|
||||
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
@@ -552,3 +553,78 @@ func (h *tenantHandler) ListClusters(r *restful.Request, response *restful.Respo
|
||||
|
||||
response.WriteEntity(result)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) CreateWorkspaceResourceQuota(r *restful.Request, response *restful.Response) {
|
||||
workspaceName := r.PathParameter("workspace")
|
||||
resourceQuota := "av1alpha2.ResourceQuota{}
|
||||
err := r.ReadEntity(resourceQuota)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(response, r, err)
|
||||
return
|
||||
}
|
||||
result, err := h.tenant.CreateWorkspaceResourceQuota(workspaceName, resourceQuota)
|
||||
if err != nil {
|
||||
api.HandleInternalError(response, r, err)
|
||||
return
|
||||
}
|
||||
response.WriteEntity(result)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) DeleteWorkspaceResourceQuota(r *restful.Request, response *restful.Response) {
|
||||
workspace := r.PathParameter("workspace")
|
||||
resourceQuota := r.PathParameter("resourcequota")
|
||||
|
||||
if err := h.tenant.DeleteWorkspaceResourceQuota(workspace, resourceQuota); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
api.HandleNotFound(response, r, err)
|
||||
return
|
||||
}
|
||||
api.HandleInternalError(response, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.WriteEntity(servererr.None)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) UpdateWorkspaceResourceQuota(r *restful.Request, response *restful.Response) {
|
||||
workspaceName := r.PathParameter("workspace")
|
||||
resourceQuotaName := r.PathParameter("resourcequota")
|
||||
resourceQuota := "av1alpha2.ResourceQuota{}
|
||||
err := r.ReadEntity(resourceQuota)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(response, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
if resourceQuotaName != resourceQuota.Name {
|
||||
err := fmt.Errorf("the name of the object (%s) does not match the name on the URL (%s)", resourceQuota.Name, resourceQuotaName)
|
||||
klog.Errorf("%+v", err)
|
||||
api.HandleBadRequest(response, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.tenant.UpdateWorkspaceResourceQuota(workspaceName, resourceQuota)
|
||||
if err != nil {
|
||||
api.HandleInternalError(response, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.WriteEntity(result)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) DescribeWorkspaceResourceQuota(r *restful.Request, response *restful.Response) {
|
||||
workspaceName := r.PathParameter("workspace")
|
||||
resourceQuotaName := r.PathParameter("resourcequota")
|
||||
|
||||
resourceQuota, err := h.tenant.DescribeWorkspaceResourceQuota(workspaceName, resourceQuotaName)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
api.HandleNotFound(response, r, err)
|
||||
return
|
||||
}
|
||||
api.HandleInternalError(response, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.WriteEntity(resourceQuota)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/models/metering"
|
||||
"net/http"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/models/metering"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
auditingv1alpha1 "kubesphere.io/kubesphere/pkg/api/auditing/v1alpha1"
|
||||
eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1"
|
||||
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization/authorizer"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
|
||||
@@ -340,6 +342,37 @@ func AddToContainer(c *restful.Container, factory informers.InformerFactory, k8s
|
||||
Doc("Get resoure price.").
|
||||
Writes(metering.PriceInfo{}).
|
||||
Returns(http.StatusOK, api.StatusOK, metering.PriceInfo{}))
|
||||
ws.Route(ws.POST("/workspaces/{workspace}/resourcequotas").
|
||||
To(handler.CreateWorkspaceResourceQuota).
|
||||
Reads(quotav1alpha2.ResourceQuota{}).
|
||||
Returns(http.StatusOK, api.StatusOK, quotav1alpha2.ResourceQuota{}).
|
||||
Doc("Create resource quota.").
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag}))
|
||||
|
||||
ws.Route(ws.DELETE("/workspaces/{workspace}/resourcequotas/{resourcequota}").
|
||||
To(handler.DeleteWorkspaceResourceQuota).
|
||||
Param(ws.PathParameter("workspace", "workspace name")).
|
||||
Param(ws.PathParameter("resourcequota", "resource quota name")).
|
||||
Returns(http.StatusOK, api.StatusOK, errors.None).
|
||||
Doc("Delete resource quota.").
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag}))
|
||||
|
||||
ws.Route(ws.PUT("/workspaces/{workspace}/resourcequotas/{resourcequota}").
|
||||
To(handler.UpdateWorkspaceResourceQuota).
|
||||
Param(ws.PathParameter("workspace", "workspace name")).
|
||||
Param(ws.PathParameter("resourcequota", "resource quota name")).
|
||||
Reads(quotav1alpha2.ResourceQuota{}).
|
||||
Returns(http.StatusOK, api.StatusOK, quotav1alpha2.ResourceQuota{}).
|
||||
Doc("Update resource quota.").
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag}))
|
||||
|
||||
ws.Route(ws.GET("/workspaces/{workspace}/resourcequotas/{resourcequota}").
|
||||
To(handler.DescribeWorkspaceResourceQuota).
|
||||
Param(ws.PathParameter("workspace", "workspace name")).
|
||||
Param(ws.PathParameter("resourcequota", "resource quota name")).
|
||||
Returns(http.StatusOK, api.StatusOK, quotav1alpha2.ResourceQuota{}).
|
||||
Doc("Describe resource quota.").
|
||||
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceTag}))
|
||||
|
||||
c.Add(ws)
|
||||
return nil
|
||||
|
||||
@@ -64,12 +64,9 @@ func (im *imOperator) UpdateUser(new *iamv1alpha2.User) (*iamv1alpha2.User, erro
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
if old.Annotations == nil {
|
||||
old.Annotations = make(map[string]string, 0)
|
||||
}
|
||||
// keep encrypted password
|
||||
new.Spec.EncryptedPassword = old.Spec.EncryptedPassword
|
||||
updated, err := im.ksClient.IamV1alpha2().Users().Update(context.Background(), old, metav1.UpdateOptions{})
|
||||
updated, err := im.ksClient.IamV1alpha2().Users().Update(context.Background(), new, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
|
||||
75
pkg/models/tenant/resourcequota.go
Normal file
75
pkg/models/tenant/resourcequota.go
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
)
|
||||
|
||||
func (t *tenantOperator) CreateWorkspaceResourceQuota(workspace string, quota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error) {
|
||||
if quota.Labels == nil {
|
||||
quota.Labels = make(map[string]string)
|
||||
}
|
||||
quota.Labels[tenantv1alpha1.WorkspaceLabel] = workspace
|
||||
quota.Spec.LabelSelector = labels.Set{tenantv1alpha1.WorkspaceLabel: workspace}
|
||||
return t.ksclient.QuotaV1alpha2().ResourceQuotas().Create(context.TODO(), quota, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) UpdateWorkspaceResourceQuota(workspace string, quota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error) {
|
||||
resourceQuota, err := t.ksclient.QuotaV1alpha2().ResourceQuotas().Get(context.TODO(), quota.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resourceQuota.Labels[tenantv1alpha1.WorkspaceLabel] != workspace {
|
||||
return nil, errors.NewNotFound(quotav1alpha2.Resource(quotav1alpha2.ResourcesSingularCluster), resourceQuota.Name)
|
||||
}
|
||||
if quota.Labels == nil {
|
||||
quota.Labels = make(map[string]string)
|
||||
}
|
||||
quota.Labels[tenantv1alpha1.WorkspaceLabel] = workspace
|
||||
quota.Spec.LabelSelector = labels.Set{tenantv1alpha1.WorkspaceLabel: workspace}
|
||||
return t.ksclient.QuotaV1alpha2().ResourceQuotas().Update(context.TODO(), quota, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) DeleteWorkspaceResourceQuota(workspace string, resourceQuotaName string) error {
|
||||
resourceQuota, err := t.ksclient.QuotaV1alpha2().ResourceQuotas().Get(context.TODO(), resourceQuotaName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resourceQuota.Labels[tenantv1alpha1.WorkspaceLabel] != workspace {
|
||||
return errors.NewNotFound(quotav1alpha2.Resource(quotav1alpha2.ResourcesSingularCluster), resourceQuotaName)
|
||||
}
|
||||
return t.ksclient.QuotaV1alpha2().ResourceQuotas().Delete(context.TODO(), resourceQuotaName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) DescribeWorkspaceResourceQuota(workspace string, resourceQuotaName string) (*quotav1alpha2.ResourceQuota, error) {
|
||||
resourceQuota, err := t.ksclient.QuotaV1alpha2().ResourceQuotas().Get(context.TODO(), resourceQuotaName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resourceQuota.Labels[tenantv1alpha1.WorkspaceLabel] != workspace {
|
||||
return nil, errors.NewNotFound(quotav1alpha2.Resource(quotav1alpha2.ResourcesSingularCluster), resourceQuotaName)
|
||||
}
|
||||
return resourceQuota, nil
|
||||
}
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
|
||||
meteringv1alpha1 "kubesphere.io/kubesphere/pkg/api/metering/v1alpha1"
|
||||
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
quotav1alpha2 "kubesphere.io/kubesphere/pkg/apis/quota/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
typesv1beta1 "kubesphere.io/kubesphere/pkg/apis/types/v1beta1"
|
||||
@@ -87,6 +88,10 @@ type Interface interface {
|
||||
ListClusters(info user.Info) (*api.ListResult, error)
|
||||
Metering(user user.Info, queryParam *meteringv1alpha1.Query) (monitoring.Metrics, error)
|
||||
MeteringHierarchy(user user.Info, queryParam *meteringv1alpha1.Query) (metering.ResourceStatistic, error)
|
||||
CreateWorkspaceResourceQuota(workspace string, resourceQuota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error)
|
||||
DeleteWorkspaceResourceQuota(workspace string, resourceQuotaName string) error
|
||||
UpdateWorkspaceResourceQuota(workspace string, resourceQuota *quotav1alpha2.ResourceQuota) (*quotav1alpha2.ResourceQuota, error)
|
||||
DescribeWorkspaceResourceQuota(workspace string, resourceQuotaName string) (*quotav1alpha2.ResourceQuota, error)
|
||||
}
|
||||
|
||||
type tenantOperator struct {
|
||||
|
||||
@@ -34,12 +34,13 @@ func (j *Jenkins) SendPureRequest(path string, httpParameters *devops.HttpParame
|
||||
// provider request header to call jenkins api.
|
||||
// transfer bearer token to basic token for inner Oauth and Jeknins
|
||||
func (j *Jenkins) SendPureRequestWithHeaderResp(path string, httpParameters *devops.HttpParameters) ([]byte, http.Header, error) {
|
||||
Url, err := url.Parse(j.Server + path)
|
||||
apiURL, err := url.Parse(j.Server + path)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
klog.V(8).Info(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
apiURL.RawQuery = httpParameters.Url.RawQuery
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
header := httpParameters.Header
|
||||
@@ -47,7 +48,7 @@ func (j *Jenkins) SendPureRequestWithHeaderResp(path string, httpParameters *dev
|
||||
|
||||
newRequest := &http.Request{
|
||||
Method: httpParameters.Method,
|
||||
URL: Url,
|
||||
URL: apiURL,
|
||||
Header: header,
|
||||
Body: httpParameters.Body,
|
||||
Form: httpParameters.Form,
|
||||
|
||||
@@ -74,10 +74,9 @@ func getRespBody(resp *http.Response) ([]byte, error) {
|
||||
|
||||
// ParseJenkinsQuery Parse the special query of jenkins.
|
||||
// ParseQuery in the standard library makes the query not re-encode
|
||||
func ParseJenkinsQuery(query string) (url.Values, error) {
|
||||
m := make(url.Values)
|
||||
err := error(nil)
|
||||
for query != "" {
|
||||
func ParseJenkinsQuery(query string) (result url.Values, err error) {
|
||||
result = make(url.Values)
|
||||
for query != "" && err == nil {
|
||||
key := query
|
||||
if i := strings.IndexAny(key, "&"); i >= 0 {
|
||||
key, query = key[:i], key[i+1:]
|
||||
@@ -91,23 +90,13 @@ func ParseJenkinsQuery(query string) (url.Values, error) {
|
||||
if i := strings.Index(key, "="); i >= 0 {
|
||||
key, value = key[:i], key[i+1:]
|
||||
}
|
||||
key, err1 := url.QueryUnescape(key)
|
||||
if err1 != nil {
|
||||
if err == nil {
|
||||
err = err1
|
||||
if key, err = url.QueryUnescape(key); err == nil {
|
||||
if value, err = url.QueryUnescape(value); err == nil {
|
||||
result[key] = append(result[key], value)
|
||||
}
|
||||
continue
|
||||
}
|
||||
value, err1 = url.QueryUnescape(value)
|
||||
if err1 != nil {
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
continue
|
||||
}
|
||||
m[key] = append(m[key], value)
|
||||
}
|
||||
return m, err
|
||||
return
|
||||
}
|
||||
|
||||
type JenkinsBlueTime time.Time
|
||||
|
||||
57
pkg/simple/client/devops/jenkins/utils_test.go
Normal file
57
pkg/simple/client/devops/jenkins/utils_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseJenkinsQuery(t *testing.T) {
|
||||
table := []testData{
|
||||
{
|
||||
param: "start=0&limit=10&branch=master",
|
||||
expected: url.Values{
|
||||
"start": []string{"0"},
|
||||
"limit": []string{"10"},
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "branch=master", expected: url.Values{
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "&branch=master", expected: url.Values{
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "branch=master&", expected: url.Values{
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "branch=%gg", expected: url.Values{}, err: true,
|
||||
},
|
||||
{
|
||||
param: "%gg=fake", expected: url.Values{}, err: true,
|
||||
},
|
||||
}
|
||||
|
||||
for index, item := range table {
|
||||
result, err := ParseJenkinsQuery(item.param)
|
||||
if item.err {
|
||||
assert.NotNil(t, err, "index: [%d], unexpected error happen %v", index, err)
|
||||
} else {
|
||||
assert.Nil(t, err, "index: [%d], unexpected error happen %v", index, err)
|
||||
}
|
||||
assert.Equal(t, item.expected, result, "index: [%d], result do not match with the expect value", index)
|
||||
}
|
||||
}
|
||||
|
||||
type testData struct {
|
||||
param string
|
||||
expected interface{}
|
||||
err bool
|
||||
}
|
||||
@@ -31,8 +31,9 @@ type PipelineList struct {
|
||||
|
||||
// GetPipeline & SearchPipelines
|
||||
type Pipeline struct {
|
||||
Class string `json:"_class,omitempty" description:"It’s a fully qualified name and is an identifier of the producer of this resource's capability." `
|
||||
Links struct {
|
||||
Annotations map[string]string `json:"annotations,omitempty" description:"Add annotations from crd" `
|
||||
Class string `json:"_class,omitempty" description:"It’s a fully qualified name and is an identifier of the producer of this resource's capability." `
|
||||
Links struct {
|
||||
Self struct {
|
||||
Class string `json:"_class,omitempty"`
|
||||
Href string `json:"href,omitempty"`
|
||||
@@ -503,9 +504,9 @@ type PipelineBranchItem struct {
|
||||
Parameters []struct {
|
||||
Class string `json:"_class,omitempty" description:"It’s a fully qualified name and is an identifier of the producer of this resource's capability."`
|
||||
DefaultParameterValue struct {
|
||||
Class string `json:"_class,omitempty" description:"It’s a fully qualified name and is an identifier of the producer of this resource's capability."`
|
||||
Name string `json:"name,omitempty" description:"name"`
|
||||
Value string `json:"value,omitempty" description:"value"`
|
||||
Class string `json:"_class,omitempty" description:"It’s a fully qualified name and is an identifier of the producer of this resource's capability."`
|
||||
Name string `json:"name,omitempty" description:"name"`
|
||||
Value interface{} `json:"value,omitempty" description:"value"`
|
||||
} `json:"defaultParameterValue,omitempty"`
|
||||
Description string `json:"description,omitempty" description:"description"`
|
||||
Name string `json:"name,omitempty" description:"name"`
|
||||
@@ -535,8 +536,8 @@ type PipelineBranchItem struct {
|
||||
// RunPipeline
|
||||
type RunPayload struct {
|
||||
Parameters []struct {
|
||||
Name string `json:"name,omitempty" description:"name"`
|
||||
Value string `json:"value,omitempty" description:"value"`
|
||||
Name string `json:"name,omitempty" description:"name"`
|
||||
Value interface{} `json:"value,omitempty" description:"value"`
|
||||
} `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1035,8 +1036,8 @@ type ResJson struct {
|
||||
Arguments []struct {
|
||||
Key string `json:"key,omitempty" description:"key"`
|
||||
Value struct {
|
||||
IsLiteral bool `json:"isLiteral,omitempty" description:"is literal or not"`
|
||||
Value string `json:"value,omitempty" description:"value"`
|
||||
IsLiteral bool `json:"isLiteral,omitempty" description:"is literal or not"`
|
||||
Value interface{} `json:"value,omitempty" description:"value"`
|
||||
} `json:"value,omitempty"`
|
||||
} `json:"arguments,omitempty"`
|
||||
} `json:"parameters,omitempty"`
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const DefaultResyncPeriod = time.Duration(120) * time.Second
|
||||
const DefaultResyncPeriod = 120 * time.Second
|
||||
|
||||
type Options struct {
|
||||
// Enable
|
||||
@@ -79,5 +79,5 @@ func (o *Options) AddFlags(fs *pflag.FlagSet, s *Options) {
|
||||
"This field is used when generating deployment yaml for agent.")
|
||||
|
||||
fs.DurationVar(&o.ClusterControllerResyncSecond, "cluster-controller-resync-second", s.ClusterControllerResyncSecond,
|
||||
"Cluster controller resync second to sync cluster resource.")
|
||||
"Cluster controller resync second to sync cluster resource. e.g. 2m 5m 10m ... default set to 2m")
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
@@ -38,7 +39,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/network/calicov3"
|
||||
@@ -94,7 +94,7 @@ type provider struct {
|
||||
options Options
|
||||
}
|
||||
|
||||
func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error {
|
||||
func (p *provider) CreateIPPool(pool *v1alpha1.IPPool) error {
|
||||
calicoPool := &calicov3.IPPool{
|
||||
TypeMeta: v1.TypeMeta{},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
@@ -104,9 +104,9 @@ func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error {
|
||||
CIDR: pool.Spec.CIDR,
|
||||
Disabled: pool.Spec.Disabled,
|
||||
NodeSelector: "all()",
|
||||
VXLANMode: v3.VXLANMode(c.options.VXLANMode),
|
||||
IPIPMode: v3.IPIPMode(c.options.IPIPMode),
|
||||
NATOutgoing: c.options.NATOutgoing,
|
||||
VXLANMode: v3.VXLANMode(p.options.VXLANMode),
|
||||
IPIPMode: v3.IPIPMode(p.options.IPIPMode),
|
||||
NATOutgoing: p.options.NATOutgoing,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error {
|
||||
klog.Warningf("cannot set reference for calico ippool %s, err=%v", pool.Name, err)
|
||||
}
|
||||
|
||||
_, err = c.client.CrdCalicov3().IPPools().Create(context.TODO(), calicoPool, v1.CreateOptions{})
|
||||
_, err = p.client.CrdCalicov3().IPPools().Create(context.TODO(), calicoPool, v1.CreateOptions{})
|
||||
if k8serrors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
@@ -129,19 +129,22 @@ func (c provider) CreateIPPool(pool *v1alpha1.IPPool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c provider) UpdateIPPool(pool *v1alpha1.IPPool) error {
|
||||
func (p *provider) UpdateIPPool(pool *v1alpha1.IPPool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error) {
|
||||
func (p *provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error) {
|
||||
stats := pool.DeepCopy()
|
||||
|
||||
calicoPool, err := c.client.CrdCalicov3().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
calicoPool, err := p.client.CrdCalicov3().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blocks, err := c.listBlocks(calicoPool)
|
||||
blocks, err := p.block.Lister().List(labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
v1alpha1.IPPoolNameLabel: calicoPool.Name,
|
||||
}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -152,9 +155,7 @@ func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error
|
||||
stats.Status.Synced = true
|
||||
stats.Status.Allocations = 0
|
||||
stats.Status.Reserved = 0
|
||||
if stats.Status.Workspaces == nil {
|
||||
stats.Status.Workspaces = make(map[string]v1alpha1.WorkspaceStatus)
|
||||
}
|
||||
stats.Status.Workspaces = make(map[string]v1alpha1.WorkspaceStatus)
|
||||
|
||||
if len(blocks) <= 0 {
|
||||
stats.Status.Unallocated = pool.NumAddresses()
|
||||
@@ -168,23 +169,20 @@ func (c provider) GetIPPoolStats(pool *v1alpha1.IPPool) (*v1alpha1.IPPool, error
|
||||
stats.Status.Unallocated = stats.Status.Capacity - stats.Status.Allocations - stats.Status.Reserved
|
||||
}
|
||||
|
||||
wks, err := c.getAssociatedWorkspaces(pool)
|
||||
wks, err := p.getAssociatedWorkspaces(pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, wk := range wks {
|
||||
status, err := c.getWorkspaceStatus(wk, pool.GetName())
|
||||
status, err := p.getWorkspaceStatus(wk, pool.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats.Status.Workspaces[wk] = *status
|
||||
}
|
||||
|
||||
for name, wk := range stats.Status.Workspaces {
|
||||
if wk.Allocations == 0 {
|
||||
delete(stats.Status.Workspaces, name)
|
||||
if status.Allocations == 0 {
|
||||
continue
|
||||
}
|
||||
stats.Status.Workspaces[wk] = *status
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
@@ -195,16 +193,18 @@ func setBlockAffiDeletion(c calicoset.Interface, blockAffi *calicov3.BlockAffini
|
||||
return nil
|
||||
}
|
||||
|
||||
blockAffi.Spec.State = string(model.StatePendingDeletion)
|
||||
_, err := c.CrdCalicov3().BlockAffinities().Update(context.TODO(), blockAffi, v1.UpdateOptions{})
|
||||
clone := blockAffi.DeepCopy()
|
||||
clone.Spec.State = string(model.StatePendingDeletion)
|
||||
_, err := c.CrdCalicov3().BlockAffinities().Update(context.TODO(), clone, v1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteBlockAffi(c calicoset.Interface, blockAffi *calicov3.BlockAffinity) error {
|
||||
trueStr := fmt.Sprintf("%t", true)
|
||||
if blockAffi.Spec.Deleted != trueStr {
|
||||
blockAffi.Spec.Deleted = trueStr
|
||||
_, err := c.CrdCalicov3().BlockAffinities().Update(context.TODO(), blockAffi, v1.UpdateOptions{})
|
||||
clone := blockAffi.DeepCopy()
|
||||
clone.Spec.Deleted = trueStr
|
||||
_, err := c.CrdCalicov3().BlockAffinities().Update(context.TODO(), clone, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -218,10 +218,10 @@ func deleteBlockAffi(c calicoset.Interface, blockAffi *calicov3.BlockAffinity) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c provider) doBlockAffis(pool *calicov3.IPPool, do func(calicoset.Interface, *calicov3.BlockAffinity) error) error {
|
||||
func (p *provider) doBlockAffis(pool *calicov3.IPPool, do func(calicoset.Interface, *calicov3.BlockAffinity) error) error {
|
||||
_, cidrNet, _ := cnet.ParseCIDR(pool.Spec.CIDR)
|
||||
|
||||
blockAffis, err := c.client.CrdCalicov3().BlockAffinities().List(context.TODO(), v1.ListOptions{})
|
||||
blockAffis, err := p.client.CrdCalicov3().BlockAffinities().List(context.TODO(), v1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func (c provider) doBlockAffis(pool *calicov3.IPPool, do func(calicoset.Interfac
|
||||
continue
|
||||
}
|
||||
|
||||
err = do(c.client, &blockAffi)
|
||||
err = do(p.client, &blockAffi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -241,34 +241,17 @@ func (c provider) doBlockAffis(pool *calicov3.IPPool, do func(calicoset.Interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c provider) listBlocks(pool *calicov3.IPPool) ([]calicov3.IPAMBlock, error) {
|
||||
_, cidrNet, _ := cnet.ParseCIDR(pool.Spec.CIDR)
|
||||
|
||||
blocks, err := c.client.CrdCalicov3().IPAMBlocks().List(context.TODO(), v1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []calicov3.IPAMBlock
|
||||
for _, block := range blocks.Items {
|
||||
_, blockCIDR, _ := cnet.ParseCIDR(block.Spec.CIDR)
|
||||
if !cidrNet.IsNetOverlap(blockCIDR.IPNet) {
|
||||
continue
|
||||
}
|
||||
result = append(result, block)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c provider) doBlocks(pool *calicov3.IPPool, do func(calicoset.Interface, *calicov3.IPAMBlock) error) error {
|
||||
blocks, err := c.listBlocks(pool)
|
||||
func (p *provider) doBlocks(pool *calicov3.IPPool, do func(calicoset.Interface, *calicov3.IPAMBlock) error) error {
|
||||
blocks, err := p.block.Lister().List(labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
v1alpha1.IPPoolNameLabel: pool.Name,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, block := range blocks {
|
||||
err = do(c.client, &block)
|
||||
err = do(p.client, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -280,8 +263,9 @@ func (c provider) doBlocks(pool *calicov3.IPPool, do func(calicoset.Interface, *
|
||||
func deleteBlock(c calicoset.Interface, block *calicov3.IPAMBlock) error {
|
||||
if block.Empty() {
|
||||
if !block.Spec.Deleted {
|
||||
block.Spec.Deleted = true
|
||||
_, err := c.CrdCalicov3().IPAMBlocks().Update(context.TODO(), block, v1.UpdateOptions{})
|
||||
clone := block.DeepCopy()
|
||||
clone.Spec.Deleted = true
|
||||
_, err := c.CrdCalicov3().IPAMBlocks().Update(context.TODO(), clone, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -297,7 +281,7 @@ func deleteBlock(c calicoset.Interface, block *calicov3.IPAMBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
func (p *provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
// Deleting a pool requires a little care because of existing endpoints
|
||||
// using IP addresses allocated in the pool. We do the deletion in
|
||||
// the following steps:
|
||||
@@ -306,7 +290,7 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
// - delete the pool
|
||||
|
||||
// Get the pool so that we can find the CIDR associated with it.
|
||||
calicoPool, err := c.client.CrdCalicov3().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
calicoPool, err := p.client.CrdCalicov3().IPPools().Get(context.TODO(), pool.Name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
@@ -318,14 +302,14 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
if !calicoPool.Spec.Disabled {
|
||||
calicoPool.Spec.Disabled = true
|
||||
|
||||
calicoPool, err = c.client.CrdCalicov3().IPPools().Update(context.TODO(), calicoPool, v1.UpdateOptions{})
|
||||
calicoPool, err = p.client.CrdCalicov3().IPPools().Update(context.TODO(), calicoPool, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
//If the address pool is being used, we return, avoiding deletions that cause other problems.
|
||||
stat, err := c.GetIPPoolStats(pool)
|
||||
stat, err := p.GetIPPoolStats(pool)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -334,13 +318,13 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
}
|
||||
|
||||
//set blockaffi to pendingdelete
|
||||
err = c.doBlockAffis(calicoPool, setBlockAffiDeletion)
|
||||
err = p.doBlockAffis(calicoPool, setBlockAffiDeletion)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
//delete block
|
||||
err = c.doBlocks(calicoPool, deleteBlock)
|
||||
err = p.doBlocks(calicoPool, deleteBlock)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrBlockInuse) {
|
||||
return false, nil
|
||||
@@ -349,13 +333,13 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
}
|
||||
|
||||
//delete blockaffi
|
||||
err = c.doBlockAffis(calicoPool, deleteBlockAffi)
|
||||
err = p.doBlockAffis(calicoPool, deleteBlockAffi)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
//delete calico ippool
|
||||
err = c.client.CrdCalicov3().IPPools().Delete(context.TODO(), calicoPool.Name, v1.DeleteOptions{})
|
||||
err = p.client.CrdCalicov3().IPPools().Delete(context.TODO(), calicoPool.Name, v1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -365,14 +349,14 @@ func (c provider) DeleteIPPool(pool *v1alpha1.IPPool) (bool, error) {
|
||||
}
|
||||
|
||||
//Synchronizing address pools at boot time
|
||||
func (c provider) syncIPPools() error {
|
||||
calicoPools, err := c.client.CrdCalicov3().IPPools().List(context.TODO(), v1.ListOptions{})
|
||||
func (p *provider) syncIPPools() error {
|
||||
calicoPools, err := p.client.CrdCalicov3().IPPools().List(context.TODO(), v1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.V(4).Infof("syncIPPools: cannot list calico ippools, err=%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
pools, err := c.ksclient.NetworkV1alpha1().IPPools().List(context.TODO(), v1.ListOptions{})
|
||||
pools, err := p.ksclient.NetworkV1alpha1().IPPools().List(context.TODO(), v1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.V(4).Infof("syncIPPools: cannot list kubesphere ippools, err=%v", err)
|
||||
return err
|
||||
@@ -402,7 +386,7 @@ func (c provider) syncIPPools() error {
|
||||
Status: v1alpha1.IPPoolStatus{},
|
||||
}
|
||||
|
||||
_, err = c.ksclient.NetworkV1alpha1().IPPools().Create(context.TODO(), pool, v1.CreateOptions{})
|
||||
_, err = p.ksclient.NetworkV1alpha1().IPPools().Create(context.TODO(), pool, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.V(4).Infof("syncIPPools: cannot create kubesphere ippools, err=%v", err)
|
||||
return err
|
||||
@@ -413,7 +397,7 @@ func (c provider) syncIPPools() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p provider) getAssociatedWorkspaces(pool *v1alpha1.IPPool) ([]string, error) {
|
||||
func (p *provider) getAssociatedWorkspaces(pool *v1alpha1.IPPool) ([]string, error) {
|
||||
var result []string
|
||||
|
||||
poolLabel := constants.WorkspaceLabelKey
|
||||
@@ -430,10 +414,19 @@ func (p provider) getAssociatedWorkspaces(pool *v1alpha1.IPPool) ([]string, erro
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return append(result, pool.GetLabels()[poolLabel]), nil
|
||||
wk := pool.GetLabels()[poolLabel]
|
||||
_, err := p.ksclient.TenantV1alpha1().Workspaces().Get(context.TODO(), wk, v1.GetOptions{})
|
||||
if k8serrors.IsNotFound(err) {
|
||||
clone := pool.DeepCopy()
|
||||
delete(clone.GetLabels(), poolLabel)
|
||||
_, err := p.ksclient.NetworkV1alpha1().IPPools().Update(context.TODO(), clone, v1.UpdateOptions{})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return append(result, wk), err
|
||||
}
|
||||
|
||||
func (p provider) getWorkspaceStatus(name string, poolName string) (*v1alpha1.WorkspaceStatus, error) {
|
||||
func (p *provider) getWorkspaceStatus(name string, poolName string) (*v1alpha1.WorkspaceStatus, error) {
|
||||
var result v1alpha1.WorkspaceStatus
|
||||
|
||||
namespaces, err := p.k8sclient.CoreV1().Namespaces().List(context.TODO(), v1.ListOptions{
|
||||
@@ -448,12 +441,19 @@ func (p provider) getWorkspaceStatus(name string, poolName string) (*v1alpha1.Wo
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
pods, err := p.k8sclient.CoreV1().Pods(ns.GetName()).List(context.TODO(), v1.ListOptions{})
|
||||
pods, err := p.k8sclient.CoreV1().Pods(ns.GetName()).List(context.TODO(), v1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
v1alpha1.IPPoolNameLabel: poolName,
|
||||
},
|
||||
).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
if pod.GetLabels() != nil && pod.GetLabels()[v1alpha1.IPPoolNameLabel] == poolName {
|
||||
if pod.Status.Phase != corev1.PodSucceeded {
|
||||
result.Allocations++
|
||||
}
|
||||
}
|
||||
@@ -462,11 +462,25 @@ func (p provider) getWorkspaceStatus(name string, poolName string) (*v1alpha1.Wo
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (p provider) Type() string {
|
||||
func (p *provider) Type() string {
|
||||
return v1alpha1.IPPoolTypeCalico
|
||||
}
|
||||
|
||||
func (p provider) SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error {
|
||||
func (p *provider) UpdateNamespace(ns *corev1.Namespace, pools []string) error {
|
||||
if pools != nil {
|
||||
annostrs, _ := json.Marshal(pools)
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = make(map[string]string)
|
||||
}
|
||||
ns.Annotations[CalicoAnnotationIPPoolV4] = string(annostrs)
|
||||
} else {
|
||||
delete(ns.Annotations, CalicoAnnotationIPPoolV4)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *provider) SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer p.queue.ShutDown()
|
||||
|
||||
@@ -488,7 +502,7 @@ func (p provider) SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p provider) processBlock(name string) error {
|
||||
func (p *provider) processBlock(name string) error {
|
||||
block, err := p.block.Lister().Get(name)
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
@@ -510,10 +524,11 @@ func (p provider) processBlock(name string) error {
|
||||
if poolCIDR.IsNetOverlap(blockCIDR.IPNet) {
|
||||
poolName = pool.Name
|
||||
|
||||
block.Labels = map[string]string{
|
||||
clone := block.DeepCopy()
|
||||
clone.Labels = map[string]string{
|
||||
v1alpha1.IPPoolNameLabel: pool.Name,
|
||||
}
|
||||
p.client.CrdCalicov3().IPAMBlocks().Update(context.TODO(), block, v1.UpdateOptions{})
|
||||
p.client.CrdCalicov3().IPAMBlocks().Update(context.TODO(), clone, v1.UpdateOptions{})
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -529,52 +544,35 @@ func (p provider) processBlock(name string) error {
|
||||
|
||||
pod, err := p.pods.Lister().Pods(namespace).Get(name)
|
||||
if err != nil {
|
||||
continue
|
||||
if k8serrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
labels := pod.GetLabels()
|
||||
clone := pod.DeepCopy()
|
||||
labels := clone.GetLabels()
|
||||
if labels != nil {
|
||||
poolLabel := labels[v1alpha1.IPPoolNameLabel]
|
||||
if poolLabel != "" {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
clone.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
pod, err = p.k8sclient.CoreV1().Pods(namespace).Get(context.TODO(), name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labels := pod.GetLabels()
|
||||
if labels != nil {
|
||||
poolLabel := labels[v1alpha1.IPPoolNameLabel]
|
||||
if poolLabel != "" {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
pod.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
if pod.GetAnnotations() == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
annostrs, _ := json.Marshal([]string{poolName})
|
||||
pod.GetAnnotations()[CalicoAnnotationIPPoolV4] = string(annostrs)
|
||||
pod.Labels[v1alpha1.IPPoolNameLabel] = poolName
|
||||
|
||||
_, err = p.k8sclient.CoreV1().Pods(namespace).Update(context.TODO(), pod, v1.UpdateOptions{})
|
||||
clone.Labels[v1alpha1.IPPoolNameLabel] = poolName
|
||||
|
||||
_, err = p.k8sclient.CoreV1().Pods(namespace).Update(context.TODO(), clone, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
p.poolQueue.Add(poolName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p provider) processBlockItem() bool {
|
||||
func (p *provider) processBlockItem() bool {
|
||||
key, quit := p.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
@@ -592,12 +590,12 @@ func (p provider) processBlockItem() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p provider) runWorker() {
|
||||
func (p *provider) runWorker() {
|
||||
for p.processBlockItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p provider) addBlock(obj interface{}) {
|
||||
func (p *provider) addBlock(obj interface{}) {
|
||||
block, ok := obj.(*calicov3.IPAMBlock)
|
||||
if !ok {
|
||||
return
|
||||
@@ -606,7 +604,7 @@ func (p provider) addBlock(obj interface{}) {
|
||||
p.queue.Add(block.Name)
|
||||
}
|
||||
|
||||
func (p provider) Default(obj runtime.Object) error {
|
||||
func (p *provider) Default(obj runtime.Object) error {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil
|
||||
@@ -639,7 +637,18 @@ func (p provider) Default(obj runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewProvider(podInformer informercorev1.PodInformer, ksclient kubesphereclient.Interface, k8sClient clientset.Interface, k8sOptions *k8s.KubernetesOptions) provider {
|
||||
func (p *provider) addPod(obj interface{}) {
|
||||
pod, _ := obj.(*corev1.Pod)
|
||||
|
||||
if pod.Labels != nil {
|
||||
pool := pod.Labels[v1alpha1.IPPoolNameLabel]
|
||||
if pool != "" && p.poolQueue != nil {
|
||||
p.poolQueue.Add(pool)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewProvider(k8sInformer k8sinformers.SharedInformerFactory, ksclient kubesphereclient.Interface, k8sClient clientset.Interface, k8sOptions *k8s.KubernetesOptions) *provider {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", k8sOptions.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to build k8s config , err=%v", err)
|
||||
@@ -677,14 +686,27 @@ func NewProvider(podInformer informercorev1.PodInformer, ksclient kubesphereclie
|
||||
}
|
||||
}
|
||||
|
||||
p := provider{
|
||||
p := &provider{
|
||||
client: client,
|
||||
ksclient: ksclient,
|
||||
k8sclient: k8sClient,
|
||||
pods: podInformer,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "calicoBlock"),
|
||||
options: opts,
|
||||
}
|
||||
p.pods = k8sInformer.Core().V1().Pods()
|
||||
|
||||
p.pods.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
poolOld := old.(*corev1.Pod).Labels[v1alpha1.IPPoolNameLabel]
|
||||
poolNew := new.(*corev1.Pod).Labels[v1alpha1.IPPoolNameLabel]
|
||||
if poolNew == poolOld {
|
||||
return
|
||||
}
|
||||
p.addPod(new)
|
||||
},
|
||||
DeleteFunc: p.addPod,
|
||||
AddFunc: p.addPod,
|
||||
})
|
||||
|
||||
blockI := calicoInformer.NewSharedInformerFactory(client, defaultResync).Crd().Calicov3().IPAMBlocks()
|
||||
blockI.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@@ -695,9 +717,16 @@ func NewProvider(podInformer informercorev1.PodInformer, ksclient kubesphereclie
|
||||
})
|
||||
p.block = blockI
|
||||
|
||||
if err := p.syncIPPools(); err != nil {
|
||||
klog.Fatalf("failed to sync calico ippool to kubesphere ippool, err=%v", err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
if err := p.syncIPPools(); err != nil {
|
||||
klog.Infof("failed to sync calico ippool to kubesphere ippool, err=%v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -423,6 +423,10 @@ func (c IPAMClient) GetUtilization(args GetUtilizationArgs) ([]*PoolUtilization,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(allPools) <= 0 {
|
||||
return nil, fmt.Errorf("not found pool")
|
||||
}
|
||||
|
||||
// Identify the ones we want and create a PoolUtilization for each of those.
|
||||
wantAllPools := len(args.Pools) == 0
|
||||
wantedPools := set.FromArray(args.Pools)
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package ippool
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
v1 "k8s.io/client-go/informers/core/v1"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1"
|
||||
@@ -35,6 +36,7 @@ type Provider interface {
|
||||
UpdateIPPool(pool *networkv1alpha1.IPPool) error
|
||||
GetIPPoolStats(pool *networkv1alpha1.IPPool) (*networkv1alpha1.IPPool, error)
|
||||
SyncStatus(stopCh <-chan struct{}, q workqueue.RateLimitingInterface) error
|
||||
UpdateNamespace(ns *corev1.Namespace, pools []string) error
|
||||
Type() string
|
||||
Default(obj runtime.Object) error
|
||||
}
|
||||
@@ -52,6 +54,10 @@ func (p provider) Default(obj runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p provider) UpdateNamespace(ns *corev1.Namespace, pools []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p provider) DeleteIPPool(pool *networkv1alpha1.IPPool) (bool, error) {
|
||||
blocks, err := p.ipamclient.ListBlocks(pool.Name)
|
||||
if err != nil {
|
||||
@@ -110,7 +116,7 @@ func newProvider(clientset kubesphereclient.Interface) provider {
|
||||
}
|
||||
}
|
||||
|
||||
func NewProvider(podInformer v1.PodInformer, clientset kubesphereclient.Interface, client clientset.Interface, pt string, k8sOptions *k8s.KubernetesOptions) Provider {
|
||||
func NewProvider(k8sInformer k8sinformers.SharedInformerFactory, clientset kubesphereclient.Interface, client clientset.Interface, pt string, k8sOptions *k8s.KubernetesOptions) Provider {
|
||||
var p Provider
|
||||
|
||||
switch pt {
|
||||
@@ -120,7 +126,7 @@ func NewProvider(podInformer v1.PodInformer, clientset kubesphereclient.Interfac
|
||||
ipamclient: ipam.NewIPAMClient(clientset, networkv1alpha1.VLAN),
|
||||
}
|
||||
case networkv1alpha1.IPPoolTypeCalico:
|
||||
p = calicoclient.NewProvider(podInformer, clientset, client, k8sOptions)
|
||||
p = calicoclient.NewProvider(k8sInformer, clientset, client, k8sOptions)
|
||||
}
|
||||
|
||||
return p
|
||||
|
||||
60
staging/README.md
Normal file
60
staging/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# External Repository Staging Area
|
||||
|
||||
This directory is the staging area for packages that have been split to their
|
||||
own repository. The content here will be periodically published to respective
|
||||
top-level kubesphere.io repositories.
|
||||
|
||||
Repositories currently staged here:
|
||||
|
||||
- [`kubesphere.io/client-go`](https://github.com/kubesphere/client-go)
|
||||
|
||||
|
||||
The code in the staging/ directory is authoritative, i.e. the only copy of the
|
||||
code. You can directly modify such code.
|
||||
|
||||
## Using staged repositories from KubeSphere code
|
||||
|
||||
KubeSphere code uses the repositories in this directory via symlinks in the
|
||||
`vendor/kubesphere.io` directory into this staging area. For example, when
|
||||
KubeSphere code imports a package from the `kubesphere.io/client-go` repository, that
|
||||
import is resolved to `staging/src/kubesphere.io/client-go` relative to the project
|
||||
root:
|
||||
|
||||
```go
|
||||
// pkg/example/some_code.go
|
||||
package example
|
||||
|
||||
import (
|
||||
"kubesphere.io/client-go/" // resolves to staging/src/kubesphere.io/client-go/dynamic
|
||||
)
|
||||
```
|
||||
|
||||
Once the change-over to external repositories is complete, these repositories
|
||||
will actually be vendored from `kubesphere.io/<package-name>`.
|
||||
|
||||
## Creating a new repository in staging
|
||||
|
||||
### Adding the staging repository in `kubesphere/kubesphere`:
|
||||
|
||||
1. Add a propose to sig-architecture in [community](https://github.com/kubesphere/community/). Waiting approval for creating the staging repository.
|
||||
|
||||
2. Once approval has been granted, create the new staging repository.
|
||||
|
||||
3. Add a symlink to the staging repo in `vendor/kubesphere.io`.
|
||||
|
||||
4. Add all mandatory template files to the staging repo such as README.md, LICENSE, OWNER,CONTRIBUTING.md.
|
||||
|
||||
|
||||
### Creating the published repository
|
||||
|
||||
1. Create an repository in the KubeSphere org. The published repository **must** have an
|
||||
initial empty commit.
|
||||
|
||||
2. Setup branch protection and enable access to the `ks-publishing-bot` bot.
|
||||
|
||||
3. Once the repository has been created in the KubeSphere org, update the publishing-bot to publish the staging repository by updating:
|
||||
|
||||
- [`rules.yaml`](/staging/publishing/rules.yaml):
|
||||
Make sure that the list of dependencies reflects the staging repos in the `Godeps.json` file.
|
||||
|
||||
4. Add the repo to the list of staging repos in this `README.md` file.
|
||||
21
staging/publishing/rules.yaml
Normal file
21
staging/publishing/rules.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
recursive-delete-patterns:
|
||||
- BUILD
|
||||
default-go-version: 1.13.15
|
||||
rules:
|
||||
- destination: client-go
|
||||
library: true
|
||||
branches:
|
||||
- source:
|
||||
branch: master
|
||||
dir: staging/src/kubesphere.io/client-go
|
||||
name: master
|
||||
# - source:
|
||||
# branch: release-3.1
|
||||
# dir: staging/src/kubesphere.io/client-go
|
||||
# name: release-3.1
|
||||
# go: 1.13.15
|
||||
|
||||
smoke-test: |
|
||||
# assumes GO111MODULE=on
|
||||
go build ./...
|
||||
go test ./...
|
||||
2
vendor/github.com/projectcalico/libcalico-go/lib/apis/v3/globalnetworkpolicy.go
generated
vendored
2
vendor/github.com/projectcalico/libcalico-go/lib/apis/v3/globalnetworkpolicy.go
generated
vendored
@@ -90,7 +90,7 @@ type GlobalNetworkPolicySpec struct {
|
||||
// type in {"frontend", "backend"}
|
||||
// deployment != "dev"
|
||||
// ! has(label_name)
|
||||
Selector string `json:"selector,omitempty" validate:"selector"`
|
||||
Selector string `json:"selector" validate:"selector"`
|
||||
// Types indicates whether this policy applies to ingress, or to egress, or to both. When
|
||||
// not explicitly specified (and so the value on creation is empty or nil), Calico defaults
|
||||
// Types according to what Ingress and Egress rules are present in the policy. The
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user