add kubectl/kubeconfig/quota/terminal api, but quota api is tempoary, will be changed as soon
This commit is contained in:
231
pkg/models/jobs/cronjobs/resource-quota.go
Normal file
231
pkg/models/jobs/cronjobs/resource-quota.go
Normal file
@@ -0,0 +1,231 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronjobs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/jobs/resources"
|
||||
)
|
||||
|
||||
const (
|
||||
pods = "count/pods"
|
||||
daemonsets = "count/daemonsets.apps"
|
||||
deployments = "count/deployments.apps"
|
||||
ingress = "count/ingresses.extensions"
|
||||
roles = "count/roles.rbac.authorization.k8s.io"
|
||||
services = "count/services"
|
||||
statefulsets = "count/statefulsets.apps"
|
||||
persistentvolumeclaims = "persistentvolumeclaims"
|
||||
)
|
||||
|
||||
type resourceUsage struct {
|
||||
NameSpace string
|
||||
Data v1.ResourceQuotaStatus
|
||||
UpdateTimeStamp int64
|
||||
}
|
||||
|
||||
type resourceQuotaWorker struct {
|
||||
k8sClient *kubernetes.Clientset
|
||||
resChan chan dataType
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
func (ru resourceUsage) namespace() string {
|
||||
return ru.NameSpace
|
||||
}
|
||||
|
||||
type workloadList map[string][]resources.WorkLoadObject
|
||||
|
||||
type otherResourceList map[string][]resources.OtherResourceObject
|
||||
|
||||
type workload struct {
|
||||
ResourceType string `json:"type"`
|
||||
ResourceList workloadList `json:"lists"`
|
||||
UpdateTimeStamp int64 `json:"updateTimestamp"`
|
||||
}
|
||||
|
||||
type otherResource struct {
|
||||
ResourceType string `json:"type"`
|
||||
ResourceList otherResourceList `json:"lists"`
|
||||
UpdateTimeStamp int64 `json:"updateTimestamp"`
|
||||
}
|
||||
|
||||
var workLoads = []string{"deployments", "daemonsets", "statefulsets"}
|
||||
|
||||
var resourceMap = map[string]string{daemonsets: "daemonsets", deployments: "deployments", ingress: "ingresses",
|
||||
roles: "roles", services: "services", statefulsets: "statefulsets", persistentvolumeclaims: "persistent-volume-claim", pods: "pods"}
|
||||
|
||||
func contain(items []string, item string) bool {
|
||||
for _, v := range items {
|
||||
if v == item {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) getResourceusage(namespace, resourceName string) (int, error) {
|
||||
|
||||
etcdcli, err := client.NewEtcdClient()
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
defer etcdcli.Close()
|
||||
key := constants.Root + "/" + resourceName
|
||||
value, err := etcdcli.Get(key)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
|
||||
if contain(workLoads, resourceName) {
|
||||
resourceStatus := workload{ResourceList: make(workloadList)}
|
||||
|
||||
err := json.Unmarshal(value, &resourceStatus)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return len(resourceStatus.ResourceList[namespace]), nil
|
||||
} else {
|
||||
resourceStatus := otherResource{ResourceList: make(otherResourceList)}
|
||||
|
||||
err := json.Unmarshal(value, &resourceStatus)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(resourceStatus.ResourceList[namespace]), nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) updateNamespaceQuota(tmpResourceList, resourceList v1.ResourceList) {
|
||||
if tmpResourceList == nil {
|
||||
tmpResourceList = resourceList
|
||||
}
|
||||
for resource, usage := range resourceList {
|
||||
tmpUsage, exist := tmpResourceList[resource]
|
||||
if !exist {
|
||||
tmpResourceList[resource] = usage
|
||||
}
|
||||
if tmpUsage.Cmp(usage) == 1 {
|
||||
tmpResourceList[resource] = usage
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) getNamespaceResourceUsageByQuota(namespace string) (*v1.ResourceQuotaStatus, error) {
|
||||
quotaList, err := rw.k8sClient.CoreV1().ResourceQuotas(namespace).List(meta_v1.ListOptions{})
|
||||
if err != nil || len(quotaList.Items) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
quotaStatus := v1.ResourceQuotaStatus{Hard: make(v1.ResourceList), Used: make(v1.ResourceList)}
|
||||
|
||||
for _, quota := range quotaList.Items {
|
||||
rw.updateNamespaceQuota(quotaStatus.Hard, quota.Status.Hard)
|
||||
rw.updateNamespaceQuota(quotaStatus.Used, quota.Status.Used)
|
||||
}
|
||||
|
||||
return "aStatus, nil
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) getNamespaceQuota(namespace string) (v1.ResourceQuotaStatus, error) {
|
||||
quota, err := rw.getNamespaceResourceUsageByQuota(namespace)
|
||||
if err != nil {
|
||||
return v1.ResourceQuotaStatus{}, err
|
||||
}
|
||||
|
||||
if quota == nil {
|
||||
quota = new(v1.ResourceQuotaStatus)
|
||||
quota.Used = make(v1.ResourceList)
|
||||
}
|
||||
|
||||
for k, v := range resourceMap {
|
||||
if _, exist := quota.Used[v1.ResourceName(k)]; !exist {
|
||||
used, err := rw.getResourceusage(namespace, v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var quantity resource.Quantity
|
||||
quantity.Set(int64(used))
|
||||
quota.Used[v1.ResourceName(k)] = quantity
|
||||
}
|
||||
}
|
||||
|
||||
return *quota, nil
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) workOnce() {
|
||||
clusterQuota := new(v1.ResourceQuotaStatus)
|
||||
clusterQuota.Used = make(v1.ResourceList)
|
||||
namespaces, err := rw.k8sClient.CoreV1().Namespaces().List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
namespace := ns.Name
|
||||
nsquota, err := rw.getNamespaceQuota(namespace)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
res := resourceUsage{NameSpace: namespace, Data: nsquota, UpdateTimeStamp: time.Now().Unix()}
|
||||
rw.resChan <- res
|
||||
|
||||
for k, v := range nsquota.Used {
|
||||
tmp := clusterQuota.Used[k]
|
||||
tmp.Add(v)
|
||||
clusterQuota.Used[k] = tmp
|
||||
}
|
||||
}
|
||||
|
||||
var quantity resource.Quantity
|
||||
quantity.Set(int64(len(namespaces.Items)))
|
||||
|
||||
clusterQuota.Used["count/namespaces"] = quantity
|
||||
res := resourceUsage{NameSpace: "\"\"", Data: *clusterQuota, UpdateTimeStamp: time.Now().Unix()}
|
||||
rw.resChan <- res
|
||||
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) chanStop() chan struct{} {
|
||||
return rw.stopChan
|
||||
}
|
||||
|
||||
func (rw *resourceQuotaWorker) chanRes() chan dataType {
|
||||
return rw.resChan
|
||||
}
|
||||
136
pkg/models/jobs/cronjobs/run.go
Normal file
136
pkg/models/jobs/cronjobs/run.go
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cronjobs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
var etcdClient *client.EtcdClient
|
||||
|
||||
var stopChan = make(chan struct{})
|
||||
|
||||
type dataType interface {
|
||||
namespace() string
|
||||
}
|
||||
|
||||
type Worker interface {
|
||||
workOnce()
|
||||
chanRes() chan dataType
|
||||
chanStop() chan struct{}
|
||||
}
|
||||
|
||||
func registerWorker(workers map[string]Worker, name string) {
|
||||
|
||||
glog.Infof("Register cronjob: %s", name)
|
||||
k8sClient := client.NewK8sClient()
|
||||
switch name {
|
||||
case constants.WorkloadStatusKey:
|
||||
worker := workloadWorker{k8sClient: k8sClient, stopChan: stopChan, resChan: make(chan dataType, 10)}
|
||||
workers[constants.WorkloadStatusKey] = &worker
|
||||
case constants.QuotaKey:
|
||||
worker := resourceQuotaWorker{k8sClient: k8sClient, stopChan: stopChan, resChan: make(chan dataType, 10)}
|
||||
workers[constants.QuotaKey] = &worker
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func run(worker Worker) {
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
glog.Error(err)
|
||||
close(worker.chanRes())
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-worker.chanStop():
|
||||
return
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
worker.workOnce()
|
||||
time.Sleep(time.Duration(constants.UpdateCircle) * time.Second)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func startWorks(workers map[string]Worker) {
|
||||
for wokername, woker := range workers {
|
||||
glog.Infof("cronjob %s start to work", wokername)
|
||||
go run(woker)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func receiveResourceStatus(workers map[string]Worker) {
|
||||
defer func() {
|
||||
close(stopChan)
|
||||
}()
|
||||
|
||||
for {
|
||||
for name, worker := range workers {
|
||||
select {
|
||||
case res, ok := <-worker.chanRes():
|
||||
if !ok {
|
||||
glog.Errorf("cronjob:%s have stopped", name)
|
||||
registerWorker(workers, name)
|
||||
run(workers[name])
|
||||
} else {
|
||||
value, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
continue
|
||||
}
|
||||
key := constants.Root + "/" + name + "/" + res.namespace()
|
||||
err = etcdClient.Put(key, string(value))
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Run() {
|
||||
glog.Info("Begin to run cronjob")
|
||||
var err error
|
||||
etcdClient, err = client.NewEtcdClient()
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
defer etcdClient.Close()
|
||||
workers := make(map[string]Worker)
|
||||
workerList := []string{constants.QuotaKey, constants.WorkloadStatusKey}
|
||||
for _, name := range workerList {
|
||||
registerWorker(workers, name)
|
||||
}
|
||||
startWorks(workers)
|
||||
receiveResourceStatus(workers)
|
||||
}
|
||||
104
pkg/models/jobs/cronjobs/workload-status.go
Normal file
104
pkg/models/jobs/cronjobs/workload-status.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package cronjobs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
var workLoadList = []string{"deployments", "daemonsets", "statefulsets"}
|
||||
|
||||
type workLoadStatus struct {
|
||||
NameSpace string
|
||||
Data map[string]int
|
||||
UpdateTimeStamp int64
|
||||
}
|
||||
|
||||
func (ws workLoadStatus) namespace() string {
|
||||
return ws.NameSpace
|
||||
}
|
||||
|
||||
type workloadWorker struct {
|
||||
k8sClient *kubernetes.Clientset
|
||||
resChan chan dataType
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
func (ww *workloadWorker) GetNamespacesResourceStatus(namespace string) (map[string]int, error) {
|
||||
|
||||
cli, err := client.NewEtcdClient()
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
res := make(map[string]int)
|
||||
|
||||
for _, resourceName := range workLoadList {
|
||||
key := constants.Root + "/" + resourceName
|
||||
value, err := cli.Get(key)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceStatus := workload{ResourceList: make(workloadList)}
|
||||
|
||||
err = json.Unmarshal(value, &resourceStatus)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notReady := 0
|
||||
for _, v := range resourceStatus.ResourceList[namespace] {
|
||||
if !v.Ready {
|
||||
notReady++
|
||||
}
|
||||
}
|
||||
res[resourceName] = notReady
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (ww workloadWorker) workOnce() {
|
||||
namespaces, err := ww.k8sClient.CoreV1().Namespaces().List(meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
|
||||
resourceStatus := make(map[string]int)
|
||||
for _, item := range namespaces.Items {
|
||||
namespace := item.Name
|
||||
namespacesResourceStatus, err := ww.GetNamespacesResourceStatus(namespace)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
}
|
||||
|
||||
var ws = workLoadStatus{UpdateTimeStamp: time.Now().Unix(), Data: namespacesResourceStatus, NameSpace: namespace}
|
||||
ww.resChan <- ws
|
||||
|
||||
for k, v := range namespacesResourceStatus {
|
||||
resourceStatus[k] = v + resourceStatus[k]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var ws = workLoadStatus{UpdateTimeStamp: time.Now().Unix(), Data: resourceStatus, NameSpace: "\"\""}
|
||||
ww.resChan <- ws
|
||||
}
|
||||
|
||||
func (ww workloadWorker) chanRes() chan dataType {
|
||||
return ww.resChan
|
||||
}
|
||||
|
||||
func (ww workloadWorker) chanStop() chan struct{} {
|
||||
return ww.stopChan
|
||||
}
|
||||
Reference in New Issue
Block a user