refactor workspace api

This commit is contained in:
jeff
2018-11-06 14:48:15 +08:00
committed by hongming
parent c5b11300a1
commit e9038e94d7
19 changed files with 3171 additions and 1683 deletions

View File

@@ -30,22 +30,33 @@ func Register(ws *restful.WebService, subPath string) {
ws.Route(ws.GET(subPath).To(handleGetComponents).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/{namespace}/{componentName}").To(handleGetComponentStatus).
Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
}
//get all components
// get a specific component status
func handleGetComponentStatus(request *restful.Request, response *restful.Response) {
namespace := request.PathParameter("namespace")
componentName := request.PathParameter("componentName")
if component, err := models.GetComponentStatus(namespace, componentName); err != nil {
response.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
} else {
response.WriteAsJson(component)
}
}
// get all components
func handleGetComponents(request *restful.Request, response *restful.Response) {
result, err := models.GetComponents()
result, err := models.GetAllComponentsStatus()
if err != nil {
response.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
} else {
response.WriteAsJson(result)
}
}

View File

@@ -1,9 +1,12 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -13,8 +16,6 @@ limitations under the License.
package monitoring
import (
"strings"
"github.com/emicklei/go-restful"
"github.com/emicklei/go-restful-openapi"
@@ -23,94 +24,193 @@ import (
"kubesphere.io/kubesphere/pkg/models/metrics"
)
func (u MonitorResource) monitorPod(request *restful.Request, response *restful.Response) {
podName := strings.Trim(request.PathParameter("pod_name"), " ")
func (u Monitor) monitorPod(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
podName := requestParams.PodName
metricName := requestParams.MetricsName
if podName != "" {
// single pod single metric
metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ")
res := metrics.MonitorPodSingleMetric(request, metricsName)
queryType, params, nullRule := metrics.AssemblePodMetricRequestInfo(requestParams, metricName)
var res *metrics.FormatedMetric
if !nullRule {
res = metrics.GetMetric(queryType, params, metricName)
}
response.WriteAsJson(res)
} else {
// multiple pod multiple metric
res := metrics.MonitorAllMetrics(request)
response.WriteAsJson(res)
// multiple
rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelPod)
// sorting
sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelPodName)
// paging
pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount)
response.WriteAsJson(pagedMetrics)
}
}
func (u MonitorResource) monitorContainer(request *restful.Request, response *restful.Response) {
metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ")
promql := metrics.MakeContainerPromQL(request)
res := client.SendPrometheusRequest(request, promql)
cleanedJson := metrics.ReformatJson(res, metricsName)
response.WriteAsJson(cleanedJson)
func (u Monitor) monitorContainer(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
res := metrics.MonitorContainer(requestParams)
response.WriteAsJson(res)
}
func (u MonitorResource) monitorWorkload(request *restful.Request, response *restful.Response) {
wlKind := request.PathParameter("workload_kind")
if strings.Trim(wlKind, " ") == "" {
func (u Monitor) monitorWorkload(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
wlKind := requestParams.WorkloadKind
if wlKind == "" {
// count all workloads figure
//metricName := "workload_count"
res := metrics.MonitorWorkloadCount(request)
res := metrics.MonitorWorkloadCount(requestParams.NsName)
response.WriteAsJson(res)
} else {
res := metrics.MonitorAllMetrics(request)
res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkload)
response.WriteAsJson(res)
}
}
// merge multiple metric: all-devops, all-roles, all-projects...this api is designed for admin
func (u MonitorResource) monitorWorkspaceUserInfo(request *restful.Request, response *restful.Response) {
res := metrics.MonitorWorkspaceUserInfo(request)
response.WriteAsJson(res)
func (u Monitor) monitorAllWorkspaces(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
if requestParams.Tp == "_statistics" {
// merge multiple metric: all-devops, all-roles, all-projects...this api is designed for admin
res := metrics.MonitorAllWorkspacesStatistics()
response.WriteAsJson(res)
} else {
rawMetrics := metrics.MonitorAllWorkspaces(requestParams)
// sorting
sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelWorkspace)
// paging
pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount)
response.WriteAsJson(pagedMetrics)
}
}
// merge multiple metric: devops, roles, projects...
func (u MonitorResource) monitorWorkspaceResourceLevelMetrics(request *restful.Request, response *restful.Response) {
res := metrics.MonitorWorkspaceResourceLevelMetrics(request)
response.WriteAsJson(res)
func (u Monitor) monitorOneWorkspace(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
tp := requestParams.Tp
if tp == "rank" {
// multiple
rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace)
// sorting
sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNamespace)
// paging
pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount)
response.WriteAsJson(pagedMetrics)
} else if tp == "_statistics" {
wsName := requestParams.WsName
// merge multiple metric: devops, roles, projects...
res := metrics.MonitorOneWorkspaceStatistics(wsName)
response.WriteAsJson(res)
} else {
res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelWorkspace)
response.WriteAsJson(res)
}
}
func (u MonitorResource) monitorWorkspacePodLevelMetrics(request *restful.Request, response *restful.Response) {
res := metrics.MonitorAllMetrics(request)
response.WriteAsJson(res)
}
func (u MonitorResource) monitorNamespace(request *restful.Request, response *restful.Response) {
nsName := strings.Trim(request.PathParameter("ns_name"), " ")
func (u Monitor) monitorNamespace(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
metricName := requestParams.MetricsName
nsName := requestParams.NsName
if nsName != "" {
// single
metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ")
res := metrics.MonitorNamespaceSingleMetric(request, metricsName)
queryType, params := metrics.AssembleNamespaceMetricRequestInfo(requestParams, metricName)
res := metrics.GetMetric(queryType, params, metricName)
response.WriteAsJson(res)
} else {
// multiple
res := metrics.MonitorAllMetrics(request)
response.WriteAsJson(res)
rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelNamespace)
// sorting
sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNamespace)
// paging
pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount)
response.WriteAsJson(pagedMetrics)
}
}
func (u MonitorResource) monitorNodeorCluster(request *restful.Request, response *restful.Response) {
metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ")
//var res *metrics.FormatedMetric
if metricsName != "" {
func (u Monitor) monitorCluster(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
metricName := requestParams.MetricsName
if metricName != "" {
// single
res := metrics.MonitorNodeorClusterSingleMetric(request, metricsName)
queryType, params := metrics.AssembleClusterMetricRequestInfo(requestParams, metricName)
res := metrics.GetMetric(queryType, params, metricName)
if metricName == metrics.MetricNameWorkspaceAllProjectCount {
res = metrics.MonitorWorkspaceNamespaceHistory(res)
}
response.WriteAsJson(res)
} else {
// multiple
res := metrics.MonitorAllMetrics(request)
res := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelCluster)
response.WriteAsJson(res)
}
}
type MonitorResource struct {
func (u Monitor) monitorNode(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
metricName := requestParams.MetricsName
if metricName != "" {
// single
queryType, params := metrics.AssembleNodeMetricRequestInfo(requestParams, metricName)
res := metrics.GetMetric(queryType, params, metricName)
nodeAddress := metrics.GetNodeAddressInfo()
metrics.AddNodeAddressMetric(res, nodeAddress)
response.WriteAsJson(res)
} else {
// multiple
rawMetrics := metrics.MonitorAllMetrics(requestParams, metrics.MetricLevelNode)
nodeAddress := metrics.GetNodeAddressInfo()
for i := 0; i < len(rawMetrics.Results); i++ {
metrics.AddNodeAddressMetric(&rawMetrics.Results[i], nodeAddress)
}
// sorting
sortedMetrics, maxMetricCount := metrics.Sort(requestParams.SortMetricName, requestParams.SortType, rawMetrics, metrics.MetricLevelNode)
// paging
pagedMetrics := metrics.Page(requestParams.PageNum, requestParams.LimitNum, sortedMetrics, maxMetricCount)
response.WriteAsJson(pagedMetrics)
}
}
// k8s component(controller, scheduler, etcd) status
func (u Monitor) monitorComponentStatus(request *restful.Request, response *restful.Response) {
requestParams := client.ParseMonitoringRequestParams(request)
status := metrics.MonitorComponentStatus(requestParams)
response.WriteAsJson(status)
}
func (u Monitor) monitorEvents(request *restful.Request, response *restful.Response) {
// k8s component healthy status
requestParams := client.ParseMonitoringRequestParams(request)
nsFilter := requestParams.NsFilter
events := metrics.MonitorEvents(nsFilter)
response.WriteAsJson(events)
}
type Monitor struct {
}
func Register(ws *restful.WebService, subPath string) {
tags := []string{"monitoring apis"}
u := MonitorResource{}
u := Monitor{}
ws.Route(ws.GET(subPath+"/clusters").To(u.monitorNodeorCluster).
ws.Route(ws.GET(subPath+"/clusters").To(u.monitorCluster).
Filter(route.RouteLogging).
Doc("monitor cluster level metrics").
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("cluster_cpu_utilisation")).
@@ -118,16 +218,20 @@ func Register(ws *restful.WebService, subPath string) {
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/nodes").To(u.monitorNodeorCluster).
ws.Route(ws.GET(subPath+"/nodes").To(u.monitorNode).
Filter(route.RouteLogging).
Doc("monitor nodes level metrics").
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("node_cpu_utilisation")).
Param(ws.QueryParameter("nodes_filter", "node re2 expression filter").DataType("string").Required(false).DefaultValue("")).
Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)).
Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)).
Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")).
Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/nodes/{node_id}").To(u.monitorNodeorCluster).
ws.Route(ws.GET(subPath+"/nodes/{node_id}").To(u.monitorNode).
Filter(route.RouteLogging).
Doc("monitor specific node level metrics").
Param(ws.PathParameter("node_id", "specific node").DataType("string").Required(true).DefaultValue("")).
@@ -141,6 +245,10 @@ func Register(ws *restful.WebService, subPath string) {
Doc("monitor namespaces level metrics").
Param(ws.QueryParameter("namespaces_filter", "namespaces re2 expression filter").DataType("string").Required(false).DefaultValue("")).
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("namespace_memory_utilisation")).
Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)).
Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)).
Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")).
Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
@@ -158,8 +266,12 @@ func Register(ws *restful.WebService, subPath string) {
Filter(route.RouteLogging).
Doc("monitor pods level metrics").
Param(ws.PathParameter("ns_name", "specific namespace").DataType("string").Required(true).DefaultValue("monitoring")).
Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("")).
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("pod_memory_utilisation_wo_cache")).
Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("")).
Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)).
Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)).
Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")).
Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
@@ -178,8 +290,12 @@ func Register(ws *restful.WebService, subPath string) {
Filter(route.RouteLogging).
Doc("monitor pods level metrics by nodeid").
Param(ws.PathParameter("node_id", "specific node").DataType("string").Required(true).DefaultValue("i-k89a62il")).
Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("openpitrix.*")).
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("pod_memory_utilisation_wo_cache")).
Param(ws.QueryParameter("pods_filter", "pod re2 expression filter").DataType("string").Required(false).DefaultValue("openpitrix.*")).
Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)).
Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)).
Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")).
Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
@@ -236,27 +352,47 @@ func Register(ws *restful.WebService, subPath string) {
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/workspaces/{workspace_name}/pods").To(u.monitorWorkspacePodLevelMetrics).
// list all namespace in this workspace by selected metrics
ws.Route(ws.GET(subPath+"/workspaces/{workspace_name}").To(u.monitorOneWorkspace).
Filter(route.RouteLogging).
Doc("monitor specific workspace level metrics").
Doc("monitor workspaces level metrics").
Param(ws.PathParameter("workspace_name", "workspace name").DataType("string").Required(true)).
Param(ws.QueryParameter("namespaces_filter", "namespaces filter").DataType("string").Required(false).DefaultValue("k.*")).
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...").DataType("string").Required(false).DefaultValue("tenant_memory_utilisation_wo_cache")).
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("namespace_memory_utilisation_wo_cache")).
Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)).
Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)).
Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")).
Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")).
Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/workspaces/{workspace_name}").To(u.monitorWorkspaceResourceLevelMetrics).
ws.Route(ws.GET(subPath+"/workspaces").To(u.monitorAllWorkspaces).
Filter(route.RouteLogging).
Doc("monitor specific workspace level metrics").
Param(ws.PathParameter("workspace_name", "workspace name").DataType("string").Required(true)).
Doc("monitor workspaces level metrics").
Param(ws.QueryParameter("metrics_filter", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("workspace_memory_utilisation")).
Param(ws.QueryParameter("workspaces_filter", "workspaces re2 expression filter").DataType("string").Required(false).DefaultValue(".*")).
Param(ws.QueryParameter("sort_metric", "sort metric").DataType("string").Required(false)).
Param(ws.QueryParameter("sort_type", "ascending descending order").DataType("string").Required(false)).
Param(ws.QueryParameter("page", "page number").DataType("string").Required(false).DefaultValue("1")).
Param(ws.QueryParameter("limit", "metrics name cpu memory...in re2 regex").DataType("string").Required(false).DefaultValue("4")).
Param(ws.QueryParameter("type", "rank, statistic").DataType("string").Required(false).DefaultValue("rank")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/workspaces").To(u.monitorWorkspaceUserInfo).
ws.Route(ws.GET(subPath+"/events").To(u.monitorEvents).
Filter(route.RouteLogging).
Doc("monitor specific workspace level metrics").
Doc("monitor k8s events").
Param(ws.QueryParameter("namespaces_filter", "namespaces filter").DataType("string").Required(false).DefaultValue(".*")).
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/components").To(u.monitorComponentStatus).
Filter(route.RouteLogging).
Doc("monitor k8s components status").
Metadata(restfulspec.KeyOpenAPITags, tags)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)

View File

@@ -11,8 +11,13 @@ import (
"k8s.io/kubernetes/pkg/util/slice"
"strconv"
"regexp"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/iam"
"kubesphere.io/kubesphere/pkg/models/metrics"
"kubesphere.io/kubesphere/pkg/models/workspaces"
)
@@ -26,9 +31,13 @@ func Register(ws *restful.WebService, subPath string) {
ws.Route(ws.GET(subPath + "/{name}").To(WorkspaceDetailHandler))
ws.Route(ws.PUT(subPath + "/{name}").To(WorkspaceEditHandler))
ws.Route(ws.GET(subPath + "/{workspace}/namespaces").To(UserNamespaceListHandler))
ws.Route(ws.GET(subPath + "/{workspace}/members/{username}/namespaces").To(UserNamespaceListHandler))
ws.Route(ws.POST(subPath + "/{name}/namespaces").To(NamespaceCreateHandler))
ws.Route(ws.DELETE(subPath + "/{name}/namespaces/{namespace}").To(NamespaceDeleteHandler))
ws.Route(ws.GET(subPath + "/{name}/namespaces/{namespace}").To(NamespaceCheckHandler))
ws.Route(ws.GET("/namespaces/{namespace}").To(NamespaceCheckHandler))
ws.Route(ws.GET(subPath + "/{name}/devops").To(DevOpsProjectHandler))
ws.Route(ws.GET(subPath + "/{name}/members/{username}/devops").To(DevOpsProjectHandler))
ws.Route(ws.POST(subPath + "/{name}/devops").To(DevOpsProjectCreateHandler))
ws.Route(ws.DELETE(subPath + "/{name}/devops/{id}").To(DevOpsProjectDeleteHandler))
@@ -171,10 +180,22 @@ func MembersRemoveHandler(req *restful.Request, resp *restful.Response) {
resp.WriteHeaderAndEntity(http.StatusOK, constants.MessageResponse{Message: "success"})
}
func NamespaceCheckHandler(req *restful.Request, resp *restful.Response) {
namespace := req.PathParameter("namespace")
exist, err := workspaces.NamespaceExistCheck(namespace)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
return
}
resp.WriteEntity(map[string]bool{"exist": exist})
}
func NamespaceDeleteHandler(req *restful.Request, resp *restful.Response) {
namespace := req.PathParameter("namespace")
workspace := req.PathParameter("name")
//force := req.QueryParameter("force")
err := workspaces.DeleteNamespace(workspace, namespace)
@@ -223,26 +244,14 @@ func DevOpsProjectCreateHandler(req *restful.Request, resp *restful.Response) {
return
}
project, err := workspaces.CreateDevopsProject(username, devops)
project, err := workspaces.CreateDevopsProject(username, workspace, devops)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
return
}
if project.ProjectId == nil {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: "project create failed"})
} else {
err = workspaces.BindingDevopsProject(workspace, *project.ProjectId)
if err != nil {
workspaces.DeleteDevopsProject(username, *project.ProjectId)
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
return
}
resp.WriteEntity(project)
}
resp.WriteEntity(project)
}
@@ -285,15 +294,53 @@ func NamespaceCreateHandler(req *restful.Request, resp *restful.Response) {
func DevOpsProjectHandler(req *restful.Request, resp *restful.Response) {
workspace := req.PathParameter("name")
username := req.PathParameter("username")
keyword := req.QueryParameter("keyword")
devOpsProjects, err := workspaces.DevopsProjects(workspace)
if username == "" {
username = req.HeaderParameter(UserNameHeader)
}
limit := 65535
offset := 0
orderBy := "createTime"
reverse := true
if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 {
limit, _ = strconv.Atoi(groups[1])
page, _ := strconv.Atoi(groups[2])
if page < 0 {
page = 1
}
offset = (page - 1) * limit
}
if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 {
orderBy = groups[1]
reverse = false
}
if q := req.QueryParameter("reverse"); q != "" {
b, err := strconv.ParseBool(q)
if err == nil {
reverse = b
}
}
total, devOpsProjects, err := workspaces.ListDevopsProjectsByUser(username, workspace, keyword, orderBy, reverse, limit, offset)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
return
}
resp.WriteEntity(devOpsProjects)
result := constants.PageableResponse{}
result.TotalCount = total
result.Items = make([]interface{}, 0)
for _, n := range devOpsProjects {
result.Items = append(result.Items, n)
}
resp.WriteEntity(result)
}
func WorkspaceCreateHandler(req *restful.Request, resp *restful.Response) {
@@ -402,10 +449,10 @@ func WorkspaceDetailHandler(req *restful.Request, resp *restful.Response) {
// List all workspaces for the current user
func UserWorkspaceListHandler(req *restful.Request, resp *restful.Response) {
keyword := req.QueryParameter("keyword")
username := req.HeaderParameter(UserNameHeader)
list, err := workspaces.ListByUser(username)
list, err := workspaces.ListWorkspaceByUser(username, keyword)
if err != nil {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
@@ -416,16 +463,62 @@ func UserWorkspaceListHandler(req *restful.Request, resp *restful.Response) {
}
func UserNamespaceListHandler(req *restful.Request, resp *restful.Response) {
withMetrics, err := strconv.ParseBool(req.QueryParameter("metrics"))
if err != nil {
withMetrics = false
}
username := req.PathParameter("username")
keyword := req.QueryParameter("keyword")
if username == "" {
username = req.HeaderParameter(UserNameHeader)
}
limit := 65535
offset := 0
orderBy := "createTime"
reverse := true
if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 {
limit, _ = strconv.Atoi(groups[1])
page, _ := strconv.Atoi(groups[2])
if page < 0 {
page = 1
}
offset = (page - 1) * limit
}
if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 {
orderBy = groups[1]
reverse = false
}
if q := req.QueryParameter("reverse"); q != "" {
b, err := strconv.ParseBool(q)
if err == nil {
reverse = b
}
}
username := req.HeaderParameter(UserNameHeader)
workspaceName := req.PathParameter("workspace")
namespaces, err := workspaces.ListNamespaceByUser(workspaceName, username)
total, namespaces, err := workspaces.ListNamespaceByUser(workspaceName, username, keyword, orderBy, reverse, limit, offset)
if withMetrics {
namespaces = metrics.GetNamespacesWithMetrics(namespaces)
}
if err != nil {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
return
}
resp.WriteEntity(namespaces)
result := constants.PageableResponse{}
result.TotalCount = total
result.Items = make([]interface{}, 0)
for _, n := range namespaces {
result.Items = append(result.Items, n)
}
resp.WriteEntity(result)
}

View File

@@ -16,13 +16,12 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/emicklei/go-restful"
"github.com/golang/glog"
"github.com/pkg/errors"
)
const (
@@ -33,13 +32,38 @@ const (
PrometheusEndpointUrl = DefaultScheme + "://" + DefaultPrometheusService + ":" + DefaultPrometheusPort + PrometheusApiPath
DefaultQueryStep = "10m"
DefaultQueryTimeout = "30s"
RangeQueryType = "query_range?"
DefaultQueryType = "query?"
)
type MonitoringRequestParams struct {
Params url.Values
QueryType string
SortMetricName string
SortType string
PageNum string
LimitNum string
Tp string
MetricsFilter string
NodesFilter string
WsFilter string
NsFilter string
PodsFilter string
ContainersFilter string
MetricsName string
WorkloadName string
NodeId string
WsName string
NsName string
PodName string
ContainerName string
WorkloadKind string
}
var client = &http.Client{}
func SendRequest(postfix string, params string) string {
epurl := PrometheusEndpointUrl + postfix + params
//glog.Info("monitoring epurl:>", epurl)
func SendMonitoringRequest(queryType string, params string) string {
epurl := PrometheusEndpointUrl + queryType + params
response, err := client.Get(epurl)
if err != nil {
glog.Error(err)
@@ -56,34 +80,57 @@ func SendRequest(postfix string, params string) string {
return ""
}
func SendPrometheusRequest(request *restful.Request, recordingRule string) string {
paramsMap, bol, err := ParseRequestHeader(request)
if err != nil {
glog.Error(err)
return ""
}
func ParseMonitoringRequestParams(request *restful.Request) *MonitoringRequestParams {
instantTime := strings.Trim(request.QueryParameter("time"), " ")
start := strings.Trim(request.QueryParameter("start"), " ")
end := strings.Trim(request.QueryParameter("end"), " ")
step := strings.Trim(request.QueryParameter("step"), " ")
timeout := strings.Trim(request.QueryParameter("timeout"), " ")
var res = ""
var postfix = ""
if bol {
// range query
postfix = "query_range?"
} else {
// query
postfix = "query?"
}
paramsMap.Set("query", recordingRule)
params := paramsMap.Encode()
res = SendRequest(postfix, params)
return res
}
sortMetricName := strings.Trim(request.QueryParameter("sort_metric"), " ")
sortType := strings.Trim(request.QueryParameter("sort_type"), " ")
pageNum := strings.Trim(request.QueryParameter("page"), " ")
limitNum := strings.Trim(request.QueryParameter("limit"), " ")
tp := strings.Trim(request.QueryParameter("type"), " ")
func ParseRequestHeader(request *restful.Request) (url.Values, bool, error) {
instantTime := request.QueryParameter("time")
start := request.QueryParameter("start")
end := request.QueryParameter("end")
step := request.QueryParameter("step")
timeout := request.QueryParameter("timeout")
metricsFilter := strings.Trim(request.QueryParameter("metrics_filter"), " ")
nodesFilter := strings.Trim(request.QueryParameter("nodes_filter"), " ")
wsFilter := strings.Trim(request.QueryParameter("workspaces_filter"), " ")
nsFilter := strings.Trim(request.QueryParameter("namespaces_filter"), " ")
podsFilter := strings.Trim(request.QueryParameter("pods_filter"), " ")
containersFilter := strings.Trim(request.QueryParameter("containers_filter"), " ")
metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ")
workloadName := strings.Trim(request.QueryParameter("workload_name"), " ")
nodeId := strings.Trim(request.PathParameter("node_id"), " ")
wsName := strings.Trim(request.PathParameter("workspace_name"), " ")
nsName := strings.Trim(request.PathParameter("ns_name"), " ")
podName := strings.Trim(request.PathParameter("pod_name"), " ")
containerName := strings.Trim(request.PathParameter("container_name"), " ")
workloadKind := strings.Trim(request.PathParameter("workload_kind"), " ")
var requestParams = MonitoringRequestParams{
SortMetricName: sortMetricName,
SortType: sortType,
PageNum: pageNum,
LimitNum: limitNum,
Tp: tp,
MetricsFilter: metricsFilter,
NodesFilter: nodesFilter,
WsFilter: wsFilter,
NsFilter: nsFilter,
PodsFilter: podsFilter,
ContainersFilter: containersFilter,
MetricsName: metricsName,
WorkloadName: workloadName,
NodeId: nodeId,
WsName: wsName,
NsName: nsName,
PodName: podName,
ContainerName: containerName,
WorkloadKind: workloadKind,
}
if timeout == "" {
timeout = DefaultQueryTimeout
@@ -93,25 +140,35 @@ func ParseRequestHeader(request *restful.Request) (url.Values, bool, error) {
}
// Whether query or query_range request
u := url.Values{}
if start != "" && end != "" {
u.Set("start", convertTimeGranularity(start))
u.Set("end", convertTimeGranularity(end))
u.Set("step", step)
u.Set("timeout", timeout)
return u, true, nil
requestParams.QueryType = RangeQueryType
requestParams.Params = u
return &requestParams
}
if instantTime != "" {
u.Set("time", instantTime)
u.Set("timeout", timeout)
return u, false, nil
requestParams.QueryType = DefaultQueryType
requestParams.Params = u
return &requestParams
} else {
//u.Set("time", strconv.FormatInt(int64(time.Now().Unix()), 10))
u.Set("timeout", timeout)
return u, false, nil
requestParams.QueryType = DefaultQueryType
requestParams.Params = u
return &requestParams
}
glog.Errorln("Parse request %s failed", u)
return u, false, errors.Errorf("Parse request time range %s failed", u)
requestParams.QueryType = DefaultQueryType
requestParams.Params = u
return &requestParams
}
func convertTimeGranularity(ts string) string {

View File

@@ -19,150 +19,123 @@ package models
import (
"time"
"k8s.io/apimachinery/pkg/labels"
"github.com/golang/glog"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
)
type ComponentsCount struct {
KubernetesCount int `json:"kubernetesCount"`
OpenpitrixCount int `json:"openpitrixCount"`
KubesphereCount int `json:"kubesphereCount"`
// Namespaces need to watch
var SYSTEM_NAMESPACES = [...]string{"kubesphere-system", "openpitrix-system", "kube-system"}
type Component struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
SelfLink string `json:"selfLink"`
Label interface{} `json:"label"`
StartedAt time.Time `json:"startedAt"`
TotalBackends int `json:"totalBackends"`
HealthyBackends int `json:"healthyBackends"`
}
type Components struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
SelfLink string `json:"selfLink"`
Label interface{} `json:"label"`
HealthStatus string `json:"healthStatus"`
CreateTime time.Time `json:"createTime"`
}
/***
* get all components from k8s and kubesphere system
*
*/
func GetComponents() (map[string]interface{}, error) {
result := make(map[string]interface{})
componentsList := make([]Components, 0)
func GetComponentStatus(namespace string, componentName string) (interface{}, error) {
k8sClient := client.NewK8sClient()
var count ComponentsCount
var components Components
label := ""
namespaces := []string{constants.KubeSystemNamespace, constants.OpenPitrixNamespace, constants.KubeSphereNamespace}
for _, ns := range namespaces {
if service, err := k8sClient.CoreV1().Services(namespace).Get(componentName, meta_v1.GetOptions{}); err != nil {
glog.Error(err)
return nil, err
} else {
set := labels.Set(service.Spec.Selector)
if ns == constants.KubeSystemNamespace {
label = "kubernetes.io/cluster-service=true"
} else if ns == constants.OpenPitrixNamespace {
label = "app=openpitrix"
} else {
label = "app=kubesphere"
component := Component{
Name: service.Name,
Namespace: service.Namespace,
SelfLink: service.SelfLink,
Label: service.Spec.Selector,
StartedAt: service.CreationTimestamp.Time,
HealthyBackends: 0,
TotalBackends: 0,
}
option := meta_v1.ListOptions{
LabelSelector: label,
}
servicelists, err := k8sClient.CoreV1().Services(ns).List(option)
if err != nil {
if pods, err := k8sClient.CoreV1().Pods(namespace).List(meta_v1.ListOptions{LabelSelector: set.AsSelector().String()}); err != nil {
glog.Error(err)
return result, err
return nil, err
} else {
for _, v := range pods.Items {
component.TotalBackends++
component.HealthyBackends++
for _, c := range v.Status.ContainerStatuses {
if !c.Ready {
component.HealthyBackends--
break
}
}
}
}
if len(servicelists.Items) > 0 {
return component, nil
}
for _, service := range servicelists.Items {
}
switch ns {
func GetAllComponentsStatus() (map[string]interface{}, error) {
case constants.KubeSystemNamespace:
count.KubernetesCount++
case constants.OpenPitrixNamespace:
count.OpenpitrixCount++
default:
count.KubesphereCount++
}
status := make(map[string]interface{})
var err error
components.Name = service.Name
components.Namespace = service.Namespace
components.CreateTime = service.CreationTimestamp.Time
components.Label = service.Spec.Selector
components.SelfLink = service.SelfLink
label := service.Spec.Selector
combination := ""
for key, val := range label {
k8sClient := client.NewK8sClient()
labelstr := key + "=" + val
for _, ns := range SYSTEM_NAMESPACES {
if combination == "" {
nsStatus := make(map[string]interface{})
combination = labelstr
services, err := k8sClient.CoreV1().Services(ns).List(meta_v1.ListOptions{})
if err != nil {
glog.Error(err)
continue
}
} else {
for _, service := range services.Items {
combination = combination + "," + labelstr
}
}
option := meta_v1.ListOptions{
LabelSelector: combination,
}
podsList, err := k8sClient.CoreV1().Pods(ns).List(option)
if err != nil {
glog.Error(err)
return result, err
}
if len(podsList.Items) > 0 {
var health bool
for _, pod := range podsList.Items {
for _, status := range pod.Status.ContainerStatuses {
if status.Ready == false {
health = status.Ready
break
} else {
health = status.Ready
}
}
if health == false {
components.HealthStatus = "unhealth"
break
}
}
if health == true {
components.HealthStatus = "health"
}
} else {
components.HealthStatus = "unhealth"
}
componentsList = append(componentsList, components)
set := labels.Set(service.Spec.Selector)
if len(set) == 0 {
continue
}
component := Component{
Name: service.Name,
Namespace: service.Namespace,
SelfLink: service.SelfLink,
Label: service.Spec.Selector,
StartedAt: service.CreationTimestamp.Time,
HealthyBackends: 0,
TotalBackends: 0,
}
if pods, err := k8sClient.CoreV1().Pods(ns).List(meta_v1.ListOptions{LabelSelector: set.AsSelector().String()}); err != nil {
glog.Error(err)
continue
} else {
for _, v := range pods.Items {
component.TotalBackends++
component.HealthyBackends++
for _, c := range v.Status.ContainerStatuses {
if !c.Ready {
component.HealthyBackends--
break
}
}
}
}
nsStatus[service.Name] = component
}
status[ns] = nsStatus
}
result["count"] = count
result["item"] = componentsList
return result, nil
return status, err
}

View File

@@ -19,10 +19,16 @@ package controllers
import (
"time"
"fmt"
"regexp"
"github.com/golang/glog"
"github.com/pkg/errors"
rbac "k8s.io/api/rbac/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
)
func (ctl *ClusterRoleBindingCtl) Name() string {
@@ -43,10 +49,63 @@ func (ctl *ClusterRoleBindingCtl) total() int {
return len(list)
}
func (ctl *ClusterRoleBindingCtl) handleWorkspaceRoleChange(clusterRole *rbac.ClusterRoleBinding) {
if groups := regexp.MustCompile("^system:(\\w+):(admin|operator|viewer)$").FindStringSubmatch(clusterRole.Name); len(groups) == 3 {
workspace := groups[1]
go ctl.restNamespaceRoleBinding(workspace)
}
}
func (ctl *ClusterRoleBindingCtl) restNamespaceRoleBinding(workspace string) {
selector := labels.SelectorFromSet(labels.Set{"kubesphere.io/workspace": workspace})
namespaces, err := ctl.K8sClient.CoreV1().Namespaces().List(meta_v1.ListOptions{LabelSelector: selector.String()})
if err != nil {
glog.Warning("workspace roles sync failed", workspace, err)
return
}
for _, namespace := range namespaces.Items {
pathJson := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, initTimeAnnotateKey, "")
_, err := ctl.K8sClient.CoreV1().Namespaces().Patch(namespace.Name, "application/strategic-merge-patch+json", []byte(pathJson))
if err != nil {
glog.Warning("workspace roles sync failed", workspace, err)
return
}
}
}
func (ctl *ClusterRoleBindingCtl) initListerAndInformer() {
informerFactory := informers.NewSharedInformerFactory(ctl.K8sClient, time.Second*resyncCircle)
ctl.lister = informerFactory.Rbac().V1().ClusterRoleBindings().Lister()
ctl.informer = informerFactory.Rbac().V1().ClusterRoleBindings().Informer()
ctl.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
},
UpdateFunc: func(old, new interface{}) {
oldValue := old.(*rbac.ClusterRoleBinding)
newValue := new.(*rbac.ClusterRoleBinding)
if !subjectsCompile(oldValue.Subjects, newValue.Subjects) {
ctl.handleWorkspaceRoleChange(newValue)
}
},
DeleteFunc: func(obj interface{}) {
},
})
}
func subjectsCompile(s1 []rbac.Subject, s2 []rbac.Subject) bool {
if len(s1) != len(s2) {
return false
}
for i, v := range s1 {
if v.Name != s2[i].Name || v.Kind != s2[i].Kind {
return false
}
}
return true
}
func (ctl *ClusterRoleBindingCtl) CountWithConditions(conditions string) int {

View File

@@ -46,17 +46,19 @@ import (
const (
provider = "kubernetes"
admin = "admin"
editor = "editor"
editor = "operator"
viewer = "viewer"
kubectlNamespace = constants.KubeSphereControlNamespace
kubectlConfigKey = "config"
openPitrixRuntimeAnnotateKey = "openpitrix_runtime"
creatorAnnotateKey = "creator"
initTimeAnnotateKey = "kubesphere.io/init-time"
workspaceLabelKey = "kubesphere.io/workspace"
)
var adminRules = []rbac.PolicyRule{{Verbs: []string{"*"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}
var editorRules = []rbac.PolicyRule{{Verbs: []string{"*"}, APIGroups: []string{"", "apps", "extensions", "batch"}, Resources: []string{"*"}}}
var viewerRules = []rbac.PolicyRule{{Verbs: []string{"list", "get", "watch"}, APIGroups: []string{"", "apps", "extensions", "batch"}, Resources: []string{"*"}}}
var editorRules = []rbac.PolicyRule{{Verbs: []string{"*"}, APIGroups: []string{"", "apps", "extensions", "batch", "kubesphere.io", "account.kubesphere.io"}, Resources: []string{"*"}}}
var viewerRules = []rbac.PolicyRule{{Verbs: []string{"list", "get", "watch"}, APIGroups: []string{"", "apps", "extensions", "batch", "kubesphere.io", "account.kubesphere.io"}, Resources: []string{"*"}}}
type runTime struct {
RuntimeId string `json:"runtime_id"`
@@ -131,15 +133,84 @@ func (ctl *NamespaceCtl) createOpRuntime(namespace string) ([]byte, error) {
return makeHttpRequest("POST", url, string(body))
}
func (ctl *NamespaceCtl) createDefaultRoleBinding(ns, user string) error {
func (ctl *NamespaceCtl) createDefaultRoleBinding(namespace *v1.Namespace) error {
roleBinding := &rbac.RoleBinding{ObjectMeta: metaV1.ObjectMeta{Name: admin, Namespace: ns},
Subjects: []rbac.Subject{{Name: user, Kind: rbac.UserKind}}, RoleRef: rbac.RoleRef{Kind: "Role", Name: admin}}
workspace := ""
creator := ""
if namespace.Annotations != nil {
creator = namespace.Annotations[creatorAnnotateKey]
}
if namespace.Labels != nil {
workspace = namespace.Labels[workspaceLabelKey]
}
_, err := ctl.K8sClient.RbacV1().RoleBindings(ns).Create(roleBinding)
adminBinding, err := ctl.K8sClient.RbacV1().RoleBindings(namespace.Name).Get(admin, metaV1.GetOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
glog.Error(err)
if err != nil {
if errors.IsNotFound(err) {
adminBinding = new(rbac.RoleBinding)
adminBinding.Name = admin
adminBinding.Namespace = namespace.Name
adminBinding.RoleRef = rbac.RoleRef{Kind: "Role", Name: admin}
} else {
return err
}
}
adminBinding.Subjects = make([]rbac.Subject, 0)
if creator != "" {
adminBinding.Subjects = append(adminBinding.Subjects, rbac.Subject{Name: creator, Kind: rbac.UserKind})
}
if workspace != "" {
workspaceAdmin, err := ctl.K8sClient.RbacV1().ClusterRoleBindings().Get(fmt.Sprintf("system:%s:admin", workspace), metaV1.GetOptions{})
if err != nil {
return err
}
adminBinding.Subjects = append(adminBinding.Subjects, workspaceAdmin.Subjects...)
}
if adminBinding.ResourceVersion == "" {
_, err = ctl.K8sClient.RbacV1().RoleBindings(namespace.Name).Create(adminBinding)
} else {
_, err = ctl.K8sClient.RbacV1().RoleBindings(namespace.Name).Update(adminBinding)
}
if err != nil {
return err
}
viewerBinding, err := ctl.K8sClient.RbacV1().RoleBindings(namespace.Name).Get(viewer, metaV1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
viewerBinding = new(rbac.RoleBinding)
viewerBinding.Name = viewer
viewerBinding.Namespace = namespace.Name
viewerBinding.RoleRef = rbac.RoleRef{Kind: "Role", Name: viewer}
} else {
return err
}
}
viewerBinding.Subjects = make([]rbac.Subject, 0)
if workspace != "" {
workspaceViewer, err := ctl.K8sClient.RbacV1().ClusterRoleBindings().Get(fmt.Sprintf("system:%s:viewer", workspace), metaV1.GetOptions{})
if err != nil {
return err
}
viewerBinding.Subjects = append(viewerBinding.Subjects, workspaceViewer.Subjects...)
}
if viewerBinding.ResourceVersion == "" {
_, err = ctl.K8sClient.RbacV1().RoleBindings(namespace.Name).Create(viewerBinding)
} else {
_, err = ctl.K8sClient.RbacV1().RoleBindings(namespace.Name).Update(viewerBinding)
}
if err != nil {
return err
}
@@ -172,60 +243,44 @@ func (ctl *NamespaceCtl) createDefaultRole(ns string) error {
return nil
}
func (ctl *NamespaceCtl) createRoleAndRuntime(item v1.Namespace) {
var creator string
var runtime string
ns := item.Name
if item.Annotations == nil {
creator = ""
runtime = ""
} else {
runtime = item.Annotations[openPitrixRuntimeAnnotateKey]
creator = item.Annotations[creatorAnnotateKey]
func (ctl *NamespaceCtl) createRoleAndRuntime(namespace *v1.Namespace) {
runtime := ""
initTime := ""
if namespace.Annotations != nil {
runtime = namespace.Annotations[openPitrixRuntimeAnnotateKey]
initTime = namespace.Annotations[initTimeAnnotateKey]
}
componentsNamespaces := []string{constants.KubeSystemNamespace, constants.OpenPitrixNamespace, constants.IstioNamespace, constants.KubeSphereNamespace}
if len(runtime) == 0 && !slice.ContainsString(componentsNamespaces, ns, nil) {
glog.Infoln("create runtime:", ns)
var runtimeCreateError error
resp, runtimeCreateError := ctl.createOpRuntime(ns)
if runtimeCreateError == nil {
var runtime runTime
runtimeCreateError = json.Unmarshal(resp, &runtime)
if runtimeCreateError == nil {
if item.Annotations == nil {
item.Annotations = make(map[string]string, 0)
}
item.Annotations[openPitrixRuntimeAnnotateKey] = runtime.RuntimeId
_, runtimeCreateError = ctl.K8sClient.CoreV1().Namespaces().Update(&item)
}
}
if runtime == "" && !slice.ContainsString(componentsNamespaces, namespace.Name, nil) {
glog.Infoln("create runtime:", namespace.Name)
_, runtimeCreateError := ctl.createOpRuntime(namespace.Name)
if runtimeCreateError != nil {
glog.Error("runtime create error:", runtimeCreateError)
}
}
if len(creator) > 0 {
roleCreateError := ctl.createDefaultRole(ns)
glog.Infoln("create default role:", ns)
if roleCreateError == nil {
roleBindingError := ctl.createDefaultRoleBinding(ns, creator)
glog.Infoln("create default role binding:", ns)
if roleBindingError != nil {
glog.Error("default role binding create error:", roleBindingError)
}
} else {
glog.Error("default role create error:", roleCreateError)
if initTime == "" {
err := ctl.createDefaultRole(namespace.Name)
glog.Infoln("create default role:", namespace.Name)
if err == nil {
err = ctl.createDefaultRoleBinding(namespace)
glog.Infoln("create default role binding:", namespace.Name)
if err != nil {
glog.Error("default role binding create error:", err)
}
} else {
glog.Error("default role create error:", err)
}
if err == nil {
pathJson := fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, initTimeAnnotateKey, time.Now().UTC().Format("2006-01-02T15:04:05Z"))
_, err = ctl.K8sClient.CoreV1().Namespaces().Patch(namespace.Name, "application/strategic-merge-patch+json", []byte(pathJson))
if err != nil {
glog.Error("annotations patch error init failed:", namespace.Name, err)
}
}
}
}
@@ -293,7 +348,7 @@ func (ctl *NamespaceCtl) createCephSecretAfterNewNs(item v1.Namespace) {
}
}
func (ctl *NamespaceCtl) generateObject(item v1.Namespace) *Namespace {
func (ctl *NamespaceCtl) generateObject(item *v1.Namespace) *Namespace {
var displayName string
if item.Annotations != nil && len(item.Annotations[DisplayName]) > 0 {
@@ -333,17 +388,17 @@ func (ctl *NamespaceCtl) sync(stopChan chan struct{}) {
db = db.CreateTable(&Namespace{})
ctl.initListerAndInformer()
list, err := ctl.lister.List(labels.Everything())
if err != nil {
glog.Error(err)
return
}
//list, err := ctl.lister.List(labels.Everything())
//if err != nil {
// glog.Error(err)
// return
//}
for _, item := range list {
obj := ctl.generateObject(*item)
db.Create(obj)
ctl.createRoleAndRuntime(*item)
}
//for _, item := range list {
// obj := ctl.generateObject(item)
// db.Create(obj)
// ctl.createRoleAndRuntime(item)
//}
ctl.informer.Run(stopChan)
}
@@ -369,16 +424,16 @@ func (ctl *NamespaceCtl) initListerAndInformer() {
AddFunc: func(obj interface{}) {
object := obj.(*v1.Namespace)
mysqlObject := ctl.generateObject(*object)
mysqlObject := ctl.generateObject(object)
db.Create(mysqlObject)
ctl.createRoleAndRuntime(*object)
ctl.createRoleAndRuntime(object)
ctl.createCephSecretAfterNewNs(*object)
},
UpdateFunc: func(old, new interface{}) {
object := new.(*v1.Namespace)
mysqlObject := ctl.generateObject(*object)
mysqlObject := ctl.generateObject(object)
db.Save(mysqlObject)
ctl.createRoleAndRuntime(*object)
ctl.createRoleAndRuntime(object)
},
DeleteFunc: func(obj interface{}) {
var item Namespace
@@ -386,7 +441,6 @@ func (ctl *NamespaceCtl) initListerAndInformer() {
db.Where("name=?", object.Name).Find(&item)
db.Delete(item)
ctl.deleteOpRuntime(*object)
},
})

View File

@@ -14,6 +14,8 @@ import (
"k8s.io/apimachinery/pkg/labels"
v12 "k8s.io/client-go/listers/rbac/v1"
"k8s.io/kubernetes/pkg/util/slice"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/controllers"
@@ -67,6 +69,7 @@ func GetUsers(names []string) ([]User, error) {
return nil, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
@@ -94,6 +97,7 @@ func GetUser(name string) (*User, error) {
return nil, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
@@ -228,7 +232,8 @@ func DeleteRoleBindings(username string) error {
length2 := len(roleBinding.Subjects)
if length2 == 0 {
k8s.RbacV1().RoleBindings(roleBinding.Namespace).Delete(roleBinding.Name, &meta_v1.DeleteOptions{})
deletePolicy := meta_v1.DeletePropagationForeground
k8s.RbacV1().RoleBindings(roleBinding.Namespace).Delete(roleBinding.Name, &meta_v1.DeleteOptions{PropagationPolicy: &deletePolicy})
} else if length2 < length1 {
k8s.RbacV1().RoleBindings(roleBinding.Namespace).Update(&roleBinding)
}
@@ -248,7 +253,8 @@ func DeleteRoleBindings(username string) error {
length2 := len(roleBinding.Subjects)
if length2 == 0 {
k8s.RbacV1().ClusterRoleBindings().Delete(roleBinding.Name, &meta_v1.DeleteOptions{})
deletePolicy := meta_v1.DeletePropagationForeground
k8s.RbacV1().ClusterRoleBindings().Delete(roleBinding.Name, &meta_v1.DeleteOptions{PropagationPolicy: &deletePolicy})
} else if length2 < length1 {
k8s.RbacV1().ClusterRoleBindings().Update(&roleBinding)
}
@@ -265,6 +271,21 @@ func GetRole(namespace string, name string) (*v1.Role, error) {
}
return role, nil
}
func GetWorkspaceUsers(workspace string, role string) []string {
users := make([]string, 0)
clusterRoleBindingLister := controllers.ResourceControllers.Controllers[controllers.ClusterRoleBindings].Lister().(v12.ClusterRoleBindingLister)
clusterRoleBinding, err := clusterRoleBindingLister.Get(fmt.Sprintf("system:%s:%s", workspace, role))
if err != nil {
return users
}
for _, s := range clusterRoleBinding.Subjects {
if s.Kind == v1.UserKind && !slice.ContainsString(users, s.Name, nil) {
users = append(users, s.Name)
}
}
return users
}
func GetClusterRoleBindings(name string) ([]v1.ClusterRoleBinding, error) {
k8s := client.NewK8sClient()
@@ -370,7 +391,6 @@ func GetRoles(namespace string, username string) ([]v1.Role, error) {
// Get cluster roles by username
func GetClusterRoles(username string) ([]v1.ClusterRole, error) {
//TODO fix NPE
clusterRoleBindingLister := controllers.ResourceControllers.Controllers[controllers.ClusterRoleBindings].Lister().(v12.ClusterRoleBindingLister)
clusterRoleLister := controllers.ResourceControllers.Controllers[controllers.ClusterRoles].Lister().(v12.ClusterRoleLister)
clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything())
@@ -382,7 +402,7 @@ func GetClusterRoles(username string) ([]v1.ClusterRole, error) {
roles := make([]v1.ClusterRole, 0)
for _, roleBinding := range clusterRoleBindings {
for _, subject := range roleBinding.Subjects {
for i, subject := range roleBinding.Subjects {
if subject.Kind == v1.UserKind && subject.Name == username {
if roleBinding.RoleRef.Kind == ClusterRoleKind {
role, err := clusterRoleLister.Get(roleBinding.RoleRef.Name)
@@ -398,7 +418,8 @@ func GetClusterRoles(username string) ([]v1.ClusterRole, error) {
roles = append(roles, *role)
break
} else if apierrors.IsNotFound(err) {
glog.Warning(err)
roleBinding.Subjects = append(roleBinding.Subjects[:i], roleBinding.Subjects[i+1:]...)
client.NewK8sClient().RbacV1().ClusterRoleBindings().Update(roleBinding)
break
} else {
return nil, err
@@ -411,76 +432,6 @@ func GetClusterRoles(username string) ([]v1.ClusterRole, error) {
return roles, nil
}
//func RuleValidate(rules []v1.PolicyRule, rule v1.PolicyRule) bool {
//
// for _, apiGroup := range rule.APIGroups {
// if len(rule.NonResourceURLs) == 0 {
// for _, resource := range rule.Resources {
//
// //if len(Rule.ResourceNames) == 0 {
//
// for _, verb := range rule.Verbs {
// if !verbValidate(rules, apiGroup, "", resource, "", verb) {
// return false
// }
// }
//
// //} else {
// // for _, resourceName := range Rule.ResourceNames {
// // for _, verb := range Rule.Verbs {
// // if !verbValidate(rules, apiGroup, "", resource, resourceName, verb) {
// // return false
// // }
// // }
// // }
// //}
// }
// } else {
// for _, nonResourceURL := range rule.NonResourceURLs {
// for _, verb := range rule.Verbs {
// if !verbValidate(rules, apiGroup, nonResourceURL, "", "", verb) {
// return false
// }
// }
// }
// }
// }
// return true
//}
//func verbValidate(rules []v1.PolicyRule, apiGroup string, nonResourceURL string, resource string, resourceName string, verb string) bool {
// for _, rule := range rules {
//
// if nonResourceURL == "" {
// if slice.ContainsString(rule.APIGroups, apiGroup, nil) ||
// slice.ContainsString(rule.APIGroups, v1.APIGroupAll, nil) {
// if slice.ContainsString(rule.Verbs, verb, nil) ||
// slice.ContainsString(rule.Verbs, v1.VerbAll, nil) {
// if slice.ContainsString(rule.Resources, v1.ResourceAll, nil) {
// return true
// } else if slice.ContainsString(rule.Resources, resource, nil) {
// if len(rule.ResourceNames) > 0 {
// if slice.ContainsString(rule.ResourceNames, resourceName, nil) {
// return true
// }
// } else if resourceName == "" {
// return true
// }
// }
// }
// }
//
// } else if slice.ContainsString(rule.NonResourceURLs, nonResourceURL, nil) ||
// slice.ContainsString(rule.NonResourceURLs, v1.NonResourceAll, nil) {
// if slice.ContainsString(rule.Verbs, verb, nil) ||
// slice.ContainsString(rule.Verbs, v1.VerbAll, nil) {
// return true
// }
// }
// }
// return false
//}
func GetUserRules(username string) (map[string][]Rule, error) {
items := make(map[string][]Rule, 0)

View File

@@ -60,9 +60,22 @@ var (
{Name: "edit",
Rules: []v1.PolicyRule{
{
Verbs: []string{"update", "patch"},
Verbs: []string{"*"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces"},
}, {
Verbs: []string{"*"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/*"},
},
{
Verbs: []string{"*"},
APIGroups: []string{"jenkins.kubesphere.io"},
Resources: []string{"*"},
}, {
Verbs: []string{"*"},
APIGroups: []string{"devops.kubesphere.io"},
Resources: []string{"*"},
},
},
},
@@ -83,7 +96,34 @@ var (
{Name: "view",
Rules: []v1.PolicyRule{
{
Verbs: []string{"list"},
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/members"},
},
},
},
{Name: "create",
Rules: []v1.PolicyRule{
{
Verbs: []string{"create"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/members"},
},
},
},
{Name: "edit",
Rules: []v1.PolicyRule{
{
Verbs: []string{"patch", "update"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/members"},
},
},
},
{Name: "delete",
Rules: []v1.PolicyRule{
{
Verbs: []string{"delete"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/members"},
},
@@ -97,7 +137,7 @@ var (
{Name: "view",
Rules: []v1.PolicyRule{
{
Verbs: []string{"list"},
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/devops"},
},
@@ -124,7 +164,7 @@ var (
{Name: "delete",
Rules: []v1.PolicyRule{
{
Verbs: []string{"delete", "deletecollection"},
Verbs: []string{"delete"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/devops"},
},
@@ -138,7 +178,7 @@ var (
{Name: "view",
Rules: []v1.PolicyRule{
{
Verbs: []string{"list"},
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/namespaces"},
},
@@ -165,7 +205,7 @@ var (
{Name: "delete",
Rules: []v1.PolicyRule{
{
Verbs: []string{"delete", "deletecollection"},
Verbs: []string{"delete"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/namespaces"},
},
@@ -173,31 +213,57 @@ var (
},
},
},
{
Name: "registries",
Actions: []Action{
{Name: "view"},
{Name: "create"},
{Name: "edit"},
{Name: "delete"},
},
},
{
Name: "organizations",
Actions: []Action{
{Name: "view"},
{Name: "create"},
{Name: "edit"},
{Name: "delete"},
},
{Name: "view",
Rules: []v1.PolicyRule{
{
Verbs: []string{"get"},
APIGroups: []string{"account.kubesphere.io"},
Resources: []string{"workspaces/organizations"},
},
},
},
{Name: "create",
Rules: []v1.PolicyRule{
{
Verbs: []string{"create"},
APIGroups: []string{"account.kubesphere.io"},
Resources: []string{"workspaces/organizations"},
},
},
},
{Name: "edit",
Rules: []v1.PolicyRule{
{
Verbs: []string{"update", "patch"},
APIGroups: []string{"account.kubesphere.io"},
Resources: []string{"workspaces/organizations"},
},
},
},
{Name: "delete",
Rules: []v1.PolicyRule{
{
Verbs: []string{"delete"},
APIGroups: []string{"account.kubesphere.io"},
Resources: []string{"workspaces/organizations"},
},
},
}},
},
{
Name: "roles",
Actions: []Action{
{Name: "view"},
{Name: "create"},
{Name: "edit"},
{Name: "delete"},
{Name: "view",
Rules: []v1.PolicyRule{
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/roles"},
},
}},
},
},
}
@@ -242,56 +308,6 @@ var (
},
},
},
//{
// Name: "projects",
// Actions: []Action{
// {Name: "view",
// Rules: []v1.PolicyRule{
// {
// Verbs: []string{"get", "watch", "list"},
// APIGroups: []string{""},
// Resources: []string{"namespaces"},
// },
// },
// },
// {Name: "create",
// Rules: []v1.PolicyRule{
// {
// Verbs: []string{"create"},
// APIGroups: []string{""},
// Resources: []string{"namespaces"},
// },
// },
// },
// {Name: "edit",
// Rules: []v1.PolicyRule{
// {
// Verbs: []string{"update", "patch"},
// APIGroups: []string{""},
// Resources: []string{"namespaces"},
// },
// },
// },
// {Name: "delete",
// Rules: []v1.PolicyRule{
// {
// Verbs: []string{"delete", "deletecollection"},
// APIGroups: []string{""},
// Resources: []string{"namespaces"},
// },
// },
// },
// {Name: "members",
// Rules: []v1.PolicyRule{
// {
// Verbs: []string{"get", "watch", "list", "create", "delete", "patch", "update"},
// APIGroups: []string{"rbac.authorization.k8s.io"},
// Resources: []string{"rolebindings", "roles"},
// },
// },
// },
// },
//},
{
Name: "accounts",
Actions: []Action{

View File

@@ -20,11 +20,9 @@ type SimpleRule struct {
}
type User struct {
Username string `json:"username"`
//UID string `json:"uid"`
Groups []string `json:"groups"`
Password string `json:"password,omitempty"`
//Extra map[string]interface{} `json:"extra"`
Username string `json:"username"`
Groups []string `json:"groups"`
Password string `json:"password,omitempty"`
AvatarUrl string `json:"avatar_url"`
Description string `json:"description"`
Email string `json:"email"`

File diff suppressed because it is too large Load Diff

View File

@@ -1,539 +0,0 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"regexp"
"strings"
"github.com/emicklei/go-restful"
"github.com/golang/glog"
"time"
"github.com/pkg/errors"
"k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sync"
"k8s.io/apimachinery/pkg/labels"
v12 "k8s.io/client-go/listers/core/v1"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/models"
"kubesphere.io/kubesphere/pkg/models/controllers"
"kubesphere.io/kubesphere/pkg/models/workspaces"
)
func getPodNameRegexInWorkload(request *restful.Request) string {
promql := MakeWorkloadRule(request)
res := client.SendPrometheusRequest(request, promql)
data := []byte(res)
var dat CommonMetricsResult
jsonErr := json.Unmarshal(data, &dat)
if jsonErr != nil {
glog.Errorln("json parse failed", jsonErr)
}
var podNames []string
for _, x := range dat.Data.Result {
podName := x.KubePodMetric.Pod
podNames = append(podNames, podName)
}
podNamesFilter := "^(" + strings.Join(podNames, "|") + ")$"
return podNamesFilter
}
func MonitorWorkloadSingleMetric(request *restful.Request, metricsName string) *FormatedMetric {
nsName := strings.Trim(request.PathParameter("ns_name"), " ")
podNamesFilter := getPodNameRegexInWorkload(request)
newPromql := MakePodPromQL(request, []string{metricsName, nsName, "", "", podNamesFilter})
podMetrics := client.SendPrometheusRequest(request, newPromql)
cleanedJson := ReformatJson(podMetrics, metricsName)
return cleanedJson
}
func MonitorPodSingleMetric(request *restful.Request, metricsName string) *FormatedMetric {
nsName := strings.Trim(request.PathParameter("ns_name"), " ")
nodeID := strings.Trim(request.PathParameter("node_id"), " ")
podName := strings.Trim(request.PathParameter("pod_name"), " ")
podFilter := strings.Trim(request.QueryParameter("pods_filter"), " ")
params := []string{metricsName, nsName, nodeID, podName, podFilter}
promql := MakePodPromQL(request, params)
if promql != "" {
res := client.SendPrometheusRequest(request, promql)
cleanedJson := ReformatJson(res, metricsName)
return cleanedJson
}
return nil
}
func MonitorNamespaceSingleMetric(request *restful.Request, metricsName string) *FormatedMetric {
recordingRule := MakeNamespacePromQL(request, metricsName)
res := client.SendPrometheusRequest(request, recordingRule)
cleanedJson := ReformatJson(res, metricsName)
return cleanedJson
}
// maybe this function is time consuming
func ReformatJson(metric string, metricsName string) *FormatedMetric {
var formatMetric FormatedMetric
err := json.Unmarshal([]byte(metric), &formatMetric)
if err != nil {
glog.Errorln("Unmarshal metric json failed", err)
}
if formatMetric.MetricName == "" {
formatMetric.MetricName = metricsName
}
// retrive metrics success
if formatMetric.Status == MetricStatusSuccess {
result := formatMetric.Data.Result
for _, res := range result {
metric, ok := res[ResultItemMetric]
me := metric.(map[string]interface{})
if ok {
delete(me, "__name__")
}
}
}
return &formatMetric
}
func collectNodeorClusterMetrics(request *restful.Request, metricsName string, ch chan<- *FormatedMetric) {
metric := MonitorNodeorClusterSingleMetric(request, metricsName)
ch <- metric
}
func collectNamespaceMetrics(request *restful.Request, metricsName string, ch chan<- *FormatedMetric) {
metric := MonitorNamespaceSingleMetric(request, metricsName)
ch <- metric
}
func collectWorkloadMetrics(request *restful.Request, metricsName string, ch chan<- *FormatedMetric) {
metricsName = strings.TrimLeft(metricsName, "workload_")
metric := MonitorWorkloadSingleMetric(request, metricsName)
ch <- metric
}
func collectWorkspaceMetrics(request *restful.Request, metricsName string, namespaceList []string, ch chan<- *FormatedMetric) {
mertic := monitorWorkspaceSingleMertic(request, metricsName, namespaceList)
ch <- mertic
}
func collectPodMetrics(request *restful.Request, metricsName string, ch chan<- *FormatedMetric) {
metric := MonitorPodSingleMetric(request, metricsName)
ch <- metric
}
func monitorWorkspaceSingleMertic(request *restful.Request, metricsName string, namespaceList []string) *FormatedMetric {
namespaceRe2 := "^(" + strings.Join(namespaceList, "|") + ")$"
newpromql := MakeWorkspacePromQL(metricsName, namespaceRe2)
podMetrics := client.SendPrometheusRequest(request, newpromql)
cleanedJson := ReformatJson(podMetrics, metricsName)
return cleanedJson
}
func filterNamespace(request *restful.Request, namespaceList []string) []string {
var newNSlist []string
nsFilter := strings.Trim(request.QueryParameter("namespaces_filter"), " ")
if nsFilter == "" {
nsFilter = ".*"
}
for _, ns := range namespaceList {
bol, _ := regexp.MatchString(nsFilter, ns)
if bol {
newNSlist = append(newNSlist, ns)
}
}
return newNSlist
}
func MonitorAllMetrics(request *restful.Request) FormatedLevelMetric {
metricsName := strings.Trim(request.QueryParameter("metrics_filter"), " ")
if metricsName == "" {
metricsName = ".*"
}
path := request.SelectedRoutePath()
sourceType := path[strings.LastIndex(path, "/")+1 : len(path)-1]
if strings.Contains(path, MetricLevelWorkload) {
sourceType = MetricLevelWorkload
} else if strings.Contains(path, MetricLevelWorkspace) {
sourceType = MetricLevelWorkspace
}
var ch = make(chan *FormatedMetric, 10)
for _, metricName := range MetricsNames {
bol, err := regexp.MatchString(metricsName, metricName)
if !bol {
continue
}
if err != nil {
glog.Errorln("regex match failed", err)
continue
}
if strings.HasPrefix(metricName, sourceType) {
if sourceType == MetricLevelCluster || sourceType == MetricLevelNode {
go collectNodeorClusterMetrics(request, metricName, ch)
} else if sourceType == MetricLevelNamespace {
go collectNamespaceMetrics(request, metricName, ch)
} else if sourceType == MetricLevelPod {
go collectPodMetrics(request, metricName, ch)
} else if sourceType == MetricLevelWorkload {
go collectWorkloadMetrics(request, metricName, ch)
} else if sourceType == MetricLevelWorkspace {
name := request.PathParameter("workspace_name")
namespaces, err := workspaces.WorkspaceNamespaces(name)
if err != nil {
glog.Errorln(err)
}
namespaces = filterNamespace(request, namespaces)
go collectWorkspaceMetrics(request, metricName, namespaces, ch)
}
}
}
var metricsArray []FormatedMetric
var tempJson *FormatedMetric
for _, k := range MetricsNames {
bol, err := regexp.MatchString(metricsName, k)
if !bol {
continue
}
if err != nil {
glog.Errorln("regex match failed")
continue
}
if strings.HasPrefix(k, sourceType) {
tempJson = <-ch
if tempJson != nil {
metricsArray = append(metricsArray, *tempJson)
}
}
}
return FormatedLevelMetric{
MetricsLevel: sourceType,
Results: metricsArray,
}
}
func MonitorWorkspaceUserInfo(req *restful.Request) FormatedLevelMetric {
var metricsArray []FormatedMetric
timestamp := time.Now().Unix()
wg := sync.WaitGroup{}
var orgResultItem FormatedMetric
var dvpResultItem FormatedMetric
var projResultItem FormatedMetric
var actResultItem FormatedMetric
wg.Add(4)
go func() {
orgNums, errOrg := workspaces.GetAllOrgNums()
orgResultItem = getSpecificMetricItem(timestamp, MetricNameWorkspaceAllOrganizationCount, WorkspaceResourceKindOrganization, orgNums, errOrg)
wg.Done()
}()
go func() {
devOpsProjectNums, errDevops := workspaces.GetAllDevOpsProjectsNums()
dvpResultItem = getSpecificMetricItem(timestamp, MetricNameWorkspaceAllDevopsCount, WorkspaceResourceKindDevops, devOpsProjectNums, errDevops)
wg.Done()
}()
go func() {
projNums, errProj := workspaces.GetAllProjectNums()
projResultItem = getSpecificMetricItem(timestamp, MetricNameWorkspaceAllProjectCount, WorkspaceResourceKindNamespace, projNums, errProj)
wg.Done()
}()
go func() {
actNums, errAct := workspaces.GetAllAccountNums()
actResultItem = getSpecificMetricItem(timestamp, MetricNameWorkspaceAllAccountCount, WorkspaceResourceKindAccount, actNums, errAct)
wg.Done()
}()
wg.Wait()
metricsArray = append(metricsArray, orgResultItem, dvpResultItem, projResultItem, actResultItem)
return FormatedLevelMetric{
MetricsLevel: MetricLevelWorkspace,
Results: metricsArray,
}
}
//func getWorkspaceMetricItem(timestamp int64, namespaceNums int64, resourceName string, err error) FormatedMetric {
// var fMetric FormatedMetric
// fMetric.Data.ResultType = ResultTypeVector
// fMetric.MetricName = MetricNameWorkspaceInfoCount
// fMetric.Status = MetricStatusSuccess
// if err != nil {
// fMetric.Status = MetricStatusError
// }
// resultItem := make(map[string]interface{})
// tmp := make(map[string]string)
// tmp[ResultItemMetricResource] = resourceName
// resultItem[ResultItemMetric] = tmp
// resultItem[ResultItemValue] = []interface{}{timestamp, strconv.FormatInt(namespaceNums, 10)}
// return fMetric
//}
func MonitorWorkspaceResourceLevelMetrics(request *restful.Request) FormatedLevelMetric {
wsName := request.PathParameter("workspace_name")
namspaces, errNs := workspaces.WorkspaceNamespaces(wsName)
devOpsProjects, errDevOps := workspaces.GetDevOpsProjects(wsName)
members, errMemb := workspaces.GetOrgMembers(wsName)
roles, errRole := workspaces.GetOrgRoles(wsName)
var fMetricsArray []FormatedMetric
timestamp := int64(time.Now().Unix())
namespaces, noneExistentNs := getExistingNamespace(namspaces)
if len(noneExistentNs) != 0 {
nsStr := strings.Join(noneExistentNs, "|")
errStr := "the namespaces " + nsStr + " do not exist"
if errNs == nil {
errNs = errors.New(errStr)
} else {
errNs = errors.New(errNs.Error() + "\t" + errStr)
}
}
// add namespaces(project) metric
nsMetrics := getSpecificMetricItem(timestamp, MetricNameWorkspaceNamespaceCount, WorkspaceResourceKindNamespace, len(namespaces), errNs)
// add devops metric
devopsMetrics := getSpecificMetricItem(timestamp, MetricNameWorkspaceDevopsCount, WorkspaceResourceKindDevops, len(devOpsProjects), errDevOps)
// add member metric
memberMetrics := getSpecificMetricItem(timestamp, MetricNameWorkspaceMemberCount, WorkspaceResourceKindMember, len(members), errMemb)
// add role metric
roleMetrics := getSpecificMetricItem(timestamp, MetricNameWorkspaceRoleCount, WorkspaceResourceKindRole, len(roles), errRole)
// add workloads count metric
wlMetrics := getWorkspaceWorkloadCountMetrics(namespaces)
// add pods count metric
podsCountMetrics := getWorkspacePodsCountMetrics(request, namespaces)
fMetricsArray = append(fMetricsArray, nsMetrics, devopsMetrics, memberMetrics, roleMetrics, wlMetrics, *podsCountMetrics)
return FormatedLevelMetric{
MetricsLevel: MetricLevelWorkspace,
Results: fMetricsArray,
}
}
func getWorkspacePodsCountMetrics(request *restful.Request, namespaces []string) *FormatedMetric {
metricName := MetricNameNamespacePodCount
var recordingRule = RulePromQLTmplMap[metricName]
nsFilter := "^(" + strings.Join(namespaces, "|") + ")$"
recordingRule = strings.Replace(recordingRule, "$1", nsFilter, -1)
res := client.SendPrometheusRequest(request, recordingRule)
cleanedJson := ReformatJson(res, metricName)
return cleanedJson
}
func getWorkspaceWorkloadCountMetrics(namespaces []string) FormatedMetric {
var wlQuotaMetrics models.ResourceQuota
wlQuotaMetrics.NameSpace = strings.Join(namespaces, "|")
wlQuotaMetrics.Data.Used = make(v1.ResourceList, 1)
wlQuotaMetrics.Data.Hard = make(v1.ResourceList, 1)
for _, ns := range namespaces {
quotaMetric, err := models.GetNamespaceQuota(ns)
if err != nil {
glog.Errorln(err)
continue
}
// sum all resources used along namespaces
quotaUsed := quotaMetric.Data.Used
for resourceName, quantity := range quotaUsed {
if _, ok := wlQuotaMetrics.Data.Used[resourceName]; ok {
tmpQuantity := wlQuotaMetrics.Data.Used[v1.ResourceName(resourceName)]
tmpQuantity.Add(quantity)
wlQuotaMetrics.Data.Used[v1.ResourceName(resourceName)] = tmpQuantity
} else {
wlQuotaMetrics.Data.Used[v1.ResourceName(resourceName)] = quantity.DeepCopy()
}
}
// sum all resources hard along namespaces
quotaHard := quotaMetric.Data.Hard
for resourceName, quantity := range quotaHard {
if _, ok := wlQuotaMetrics.Data.Hard[resourceName]; ok {
tmpQuantity := wlQuotaMetrics.Data.Hard[v1.ResourceName(resourceName)]
tmpQuantity.Add(quantity)
wlQuotaMetrics.Data.Hard[v1.ResourceName(resourceName)] = tmpQuantity
} else {
wlQuotaMetrics.Data.Hard[v1.ResourceName(resourceName)] = quantity.DeepCopy()
}
}
}
wlMetrics := convertQuota2MetricStruct(&wlQuotaMetrics)
return wlMetrics
}
func getSpecificMetricItem(timestamp int64, metricName string, kind string, count int, err error) FormatedMetric {
var nsMetrics FormatedMetric
nsMetrics.MetricName = metricName
nsMetrics.Data.ResultType = ResultTypeVector
resultItem := make(map[string]interface{})
tmp := make(map[string]string)
tmp[ResultItemMetricResource] = kind
if err == nil {
nsMetrics.Status = MetricStatusSuccess
} else {
nsMetrics.Status = MetricStatusError
resultItem["errorinfo"] = err.Error()
}
resultItem[ResultItemMetric] = tmp
resultItem[ResultItemValue] = []interface{}{timestamp, count}
nsMetrics.Data.Result = make([]map[string]interface{}, 1)
nsMetrics.Data.Result[0] = resultItem
return nsMetrics
}
func MonitorNodeorClusterSingleMetric(request *restful.Request, metricsName string) *FormatedMetric {
// support cluster node statistic, include healthy nodes and unhealthy nodes
var res string
var fMetric FormatedMetric
timestamp := int64(time.Now().Unix())
if metricsName == "cluster_node_online" {
onlineNodes, _ := getNodeHealthyConditionMetric()
fMetric = getSpecificMetricItem(timestamp, MetricNameClusterHealthyNodeCount, "node_count", len(onlineNodes), nil)
} else if metricsName == "cluster_node_offline" {
_, offlineNodes := getNodeHealthyConditionMetric()
fMetric = getSpecificMetricItem(timestamp, MetricNameClusterUnhealthyNodeCount, "node_count", len(offlineNodes), nil)
} else if metricsName == "cluster_node_total" {
onlineNodes, offlineNodes := getNodeHealthyConditionMetric()
fMetric = getSpecificMetricItem(timestamp, MetricNameClusterNodeCount, "node_count", len(onlineNodes)+len(offlineNodes), nil)
} else {
recordingRule := MakeNodeorClusterRule(request, metricsName)
res = client.SendPrometheusRequest(request, recordingRule)
fMetric = *ReformatJson(res, metricsName)
}
return &fMetric
}
func getNodeHealthyConditionMetric() ([]string, []string) {
nodeList, err := client.NewK8sClient().CoreV1().Nodes().List(metaV1.ListOptions{})
if err != nil {
glog.Errorln(err)
return nil, nil
}
var onlineNodes []string
var offlineNodes []string
for _, node := range nodeList.Items {
nodeName := node.Labels["kubernetes.io/hostname"]
nodeRole := node.Labels["role"]
bol := true
for _, cond := range node.Status.Conditions {
if cond.Type == "Ready" && cond.Status == "Unknown" {
bol = false
break
}
}
if nodeRole != "log" {
if bol {
// reachable node
onlineNodes = append(onlineNodes, nodeName)
} else {
// unreachable node
offlineNodes = append(offlineNodes, nodeName)
}
}
}
return onlineNodes, offlineNodes
}
func getExistingNamespace(namespaces []string) ([]string, []string) {
namespaceMap, err := getAllNamespace()
var existedNs []string
var noneExistedNs []string
if err != nil {
return namespaces, nil
}
for _, ns := range namespaces {
if _, ok := namespaceMap[ns]; ok {
existedNs = append(existedNs, ns)
} else {
noneExistedNs = append(noneExistedNs, ns)
}
}
return existedNs, noneExistedNs
}
func getAllNamespace() (map[string]int, error) {
lister := controllers.ResourceControllers.Controllers[controllers.Namespaces].Lister().(v12.NamespaceLister)
nsList, err := lister.List(labels.Everything())
if err != nil {
glog.Errorln(err)
return nil, err
}
namespaceMap := make(map[string]int)
for _, item := range nsList {
namespaceMap[item.Name] = 0
}
return namespaceMap, nil
}
func MonitorWorkloadCount(request *restful.Request) FormatedMetric {
namespace := strings.Trim(request.PathParameter("ns_name"), " ")
quotaMetric, err := models.GetNamespaceQuota(namespace)
fMetric := convertQuota2MetricStruct(quotaMetric)
// whether the namespace in request parameters exists?
namespaceMap, e := getAllNamespace()
_, ok := namespaceMap[namespace]
if e != nil {
ok = true
}
if !ok || err != nil {
fMetric.Status = MetricStatusError
fMetric.Data.ResultType = ""
errInfo := make(map[string]interface{})
if err != nil {
errInfo["errormsg"] = err.Error()
} else {
errInfo["errormsg"] = "namespace " + namespace + " does not exist"
}
fMetric.Data.Result = []map[string]interface{}{errInfo}
}
return fMetric
}
func convertQuota2MetricStruct(quotaMetric *models.ResourceQuota) FormatedMetric {
var fMetric FormatedMetric
fMetric.MetricName = MetricNameWorkloadCount
fMetric.Status = MetricStatusSuccess
fMetric.Data.ResultType = ResultTypeVector
timestamp := int64(time.Now().Unix())
var resultItems []map[string]interface{}
hardMap := make(map[string]string)
for resourceName, v := range quotaMetric.Data.Hard {
hardMap[resourceName.String()] = v.String()
}
for resourceName, v := range quotaMetric.Data.Used {
resultItem := make(map[string]interface{})
tmp := make(map[string]string)
tmp[ResultItemMetricResource] = resourceName.String()
resultItem[ResultItemMetric] = tmp
resultItem[ResultItemValue] = []interface{}{timestamp, hardMap[resourceName.String()], v.String()}
resultItems = append(resultItems, resultItem)
}
fMetric.Data.Result = resultItems
return fMetric
}

View File

@@ -1,236 +0,0 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
const (
ResultTypeVector = "vector"
ResultTypeMatrix = "matrix"
MetricStatusError = "error"
MetricStatusSuccess = "success"
ResultItemMetric = "metric"
ResultItemMetricResource = "resource"
ResultItemValue = "value"
)
const (
MetricNameWorkloadCount = "workload_count"
MetricNameNamespacePodCount = "namespace_pod_count"
MetricNameWorkspaceAllOrganizationCount = "workspace_all_organization_count"
MetricNameWorkspaceAllAccountCount = "workspace_all_account_count"
MetricNameWorkspaceAllProjectCount = "workspace_all_project_count"
MetricNameWorkspaceAllDevopsCount = "workspace_all_devops_project_count"
MetricNameWorkspaceNamespaceCount = "workspace_namespace_count"
MetricNameWorkspaceDevopsCount = "workspace_devops_project_count"
MetricNameWorkspaceMemberCount = "workspace_member_count"
MetricNameWorkspaceRoleCount = "workspace_role_count"
MetricNameClusterHealthyNodeCount = "cluster_node_online"
MetricNameClusterUnhealthyNodeCount = "cluster_node_offline"
MetricNameClusterNodeCount = "cluster_node_total"
)
const (
WorkspaceResourceKindOrganization = "organization"
WorkspaceResourceKindAccount = "account"
WorkspaceResourceKindNamespace = "namespace"
WorkspaceResourceKindDevops = "devops"
WorkspaceResourceKindMember = "member"
WorkspaceResourceKindRole = "role"
)
const (
MetricLevelCluster = "cluster"
MetricLevelNode = "node"
MetricLevelWorkspace = "workspace"
MetricLevelNamespace = "namespace"
MetricLevelPod = "pod"
MetricLevelContainer = "container"
MetricLevelWorkload = "workload"
)
type MetricMap map[string]string
var MetricsNames = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
"cluster_cpu_total",
"cluster_memory_utilisation",
"cluster_pod_count",
"cluster_memory_bytes_available",
"cluster_memory_bytes_total",
"cluster_memory_bytes_usage",
"cluster_net_utilisation",
"cluster_net_bytes_transmitted",
"cluster_net_bytes_received",
"cluster_disk_read_iops",
"cluster_disk_write_iops",
"cluster_disk_read_throughput",
"cluster_disk_write_throughput",
"cluster_disk_size_usage",
"cluster_disk_size_utilisation",
"cluster_disk_size_capacity",
"cluster_disk_size_available",
"cluster_node_online",
"cluster_node_offline",
"cluster_node_total",
"node_cpu_utilisation",
"node_cpu_total",
"node_cpu_usage",
"node_memory_utilisation",
"node_memory_bytes_usage",
"node_memory_bytes_available",
"node_memory_bytes_total",
"node_net_utilisation",
"node_net_bytes_transmitted",
"node_net_bytes_received",
"node_disk_read_iops",
"node_disk_write_iops",
"node_disk_read_throughput",
"node_disk_write_throughput",
"node_disk_size_capacity",
"node_disk_size_available",
"node_disk_size_usage",
"node_disk_size_utilisation",
"node_pod_count",
"node_pod_quota",
"namespace_cpu_usage",
"namespace_memory_usage",
"namespace_memory_usage_wo_cache",
"namespace_net_bytes_transmitted",
"namespace_net_bytes_received",
"namespace_pod_count",
"pod_cpu_usage",
"pod_memory_usage",
"pod_memory_usage_wo_cache",
"pod_net_bytes_transmitted",
"pod_net_bytes_received",
"workload_pod_cpu_usage",
"workload_pod_memory_usage",
"workload_pod_memory_usage_wo_cache",
"workload_pod_net_bytes_transmitted",
"workload_pod_net_bytes_received",
//"container_cpu_usage",
//"container_memory_usage_wo_cache",
//"container_memory_usage",
"workspace_cpu_usage",
"workspace_memory_usage",
"workspace_memory_usage_wo_cache",
"workspace_net_bytes_transmitted",
"workspace_net_bytes_received",
"workspace_pod_count",
}
var RulePromQLTmplMap = MetricMap{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
"cluster_cpu_usage": `sum (irate(container_cpu_usage_seconds_total{job="kubelet", image!=""}[5m]))`,
"cluster_cpu_total": "sum(node:node_num_cpu:sum)",
"cluster_memory_utilisation": ":node_memory_utilisation:",
"cluster_pod_count": `count(kube_pod_info unless on(pod) kube_pod_completion_time unless on(node) kube_node_labels{label_role="log"})`,
"cluster_memory_bytes_available": "sum(node:node_memory_bytes_available:sum)",
"cluster_memory_bytes_total": "sum(node:node_memory_bytes_total:sum)",
"cluster_memory_bytes_usage": "sum(node:node_memory_bytes_total:sum) - sum(node:node_memory_bytes_available:sum)",
"cluster_net_utilisation": ":node_net_utilisation:sum_irate",
"cluster_net_bytes_transmitted": "sum(node:node_net_bytes_transmitted:sum_irate)",
"cluster_net_bytes_received": "sum(node:node_net_bytes_received:sum_irate)",
"cluster_disk_read_iops": "sum(node:data_volume_iops_reads:sum)",
"cluster_disk_write_iops": "sum(node:data_volume_iops_writes:sum)",
"cluster_disk_read_throughput": "sum(node:data_volume_throughput_bytes_read:sum)",
"cluster_disk_write_throughput": "sum(node:data_volume_throughput_bytes_written:sum)",
"cluster_disk_size_usage": `sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:)) - sum(sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_size_utilisation": `(sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:)) - sum(sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))) / sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_size_capacity": `sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_size_available": `sum(sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
//node
"node_cpu_utilisation": "node:node_cpu_utilisation:avg1m",
"node_cpu_total": "node:node_num_cpu:sum",
"node_memory_utilisation": "node:node_memory_utilisation:",
"node_memory_bytes_available": "node:node_memory_bytes_available:sum",
"node_memory_bytes_total": "node:node_memory_bytes_total:sum",
// Node network utilisation (bytes received + bytes transmitted per second)
"node_net_utilisation": "node:node_net_utilisation:sum_irate",
// Node network bytes transmitted per second
"node_net_bytes_transmitted": "node:node_net_bytes_transmitted:sum_irate",
// Node network bytes received per second
"node_net_bytes_received": "node:node_net_bytes_received:sum_irate",
// node:data_volume_iops_reads:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_read_iops": "node:data_volume_iops_reads:sum",
// node:data_volume_iops_writes:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_write_iops": "node:data_volume_iops_writes:sum",
// node:data_volume_throughput_bytes_read:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_read_throughput": "node:data_volume_throughput_bytes_read:sum",
// node:data_volume_throughput_bytes_written:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_write_throughput": "node:data_volume_throughput_bytes_written:sum",
"node_disk_size_capacity": `sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_size_available": `sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_size_usage": `sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1) -sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_size_utilisation": `sum by (node) (((node_filesystem_size{mountpoint="/", job="node-exporter"} - node_filesystem_avail{mountpoint="/", job="node-exporter"}) / node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_pod_count": `count(kube_pod_info$1 unless on(pod) kube_pod_completion_time) by (node)`,
// without log node: unless on(node) kube_node_labels{label_role="log"}
"node_pod_quota": `sum(kube_node_status_capacity_pods$1) by (node)`,
"node_cpu_usage": `sum by (node) (label_join(irate(container_cpu_usage_seconds_total{job="kubelet", image!=""}[5m]), "pod", " ", "pod_name") * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_memory_bytes_usage": "node:node_memory_bytes_total:sum$1 - node:node_memory_bytes_available:sum$1",
//namespace
"namespace_cpu_usage": `namespace:container_cpu_usage_seconds_total:sum_rate{namespace=~"$1"}`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace=~"$1"}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace=~"$1"}`,
"namespace_net_bytes_transmitted": `sum by (namespace) (irate(container_network_transmit_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m]))`,
"namespace_net_bytes_received": `sum by (namespace) (irate(container_network_receive_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m]))`,
"namespace_pod_count": `count(kube_pod_info{namespace=~"$1"} unless on(pod) kube_pod_completion_time) by (namespace)`,
// pod
"pod_cpu_usage": `sum(irate(container_cpu_usage_seconds_total{job="kubelet", namespace="$1", pod_name="$2", image!=""}[5m])) by (namespace, pod_name)`,
"pod_memory_usage": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name="$2", image!=""}) by (namespace, pod_name)`,
"pod_memory_usage_wo_cache": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name="$2", image!=""} - container_memory_cache{job="kubelet", namespace="$1", pod_name="$2",image!=""}) by (namespace, pod_name)`,
"pod_net_bytes_transmitted": `sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{namespace="$1", pod_name!="", pod_name="$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_net_bytes_received": `sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{namespace="$1", pod_name!="", pod_name="$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_cpu_usage_all": `sum(irate(container_cpu_usage_seconds_total{job="kubelet", namespace="$1", pod_name=~"$2", image!=""}[5m])) by (namespace, pod_name)`,
"pod_memory_usage_all": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name=~"$2", image!=""}) by (namespace, pod_name)`,
"pod_memory_usage_wo_cache_all": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name=~"$2", image!=""} - container_memory_cache{job="kubelet", namespace="$1", pod_name=~"$2", image!=""}) by (namespace, pod_name)`,
"pod_net_bytes_transmitted_all": `sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{namespace="$1", pod_name!="", pod_name=~"$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_net_bytes_received_all": `sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{namespace="$1", pod_name!="", pod_name=~"$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_cpu_usage_node": `sum by (node, pod) (label_join(irate(container_cpu_usage_seconds_total{job="kubelet",pod_name=~"$2", image!=""}[5m]), "pod", " ", "pod_name") * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{node=~"$3"})`,
"pod_memory_usage_node": `sum by (node, pod) (label_join(container_memory_usage_bytes{job="kubelet",pod_name=~"$2", image!=""}, "pod", " ", "pod_name") * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{node=~"$3"})`,
"pod_memory_usage_wo_cache_node": `sum by (node, pod) ((label_join(container_memory_usage_bytes{job="kubelet",pod_name=~"$2", image!=""}, "pod", " ", "pod_name") - label_join(container_memory_cache{job="kubelet",pod_name=~"$2", image!=""}, "pod", " ", "pod_name")) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{node=~"$3"})`,
// container
"container_cpu_usage": `sum(irate(container_cpu_usage_seconds_total{namespace="$1", pod_name="$2", container_name="$3"}[5m])) by (namespace, pod_name, container_name)`,
"container_cpu_usage_all": `sum(irate(container_cpu_usage_seconds_total{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"}[5m])) by (namespace, pod_name, container_name)`,
"container_memory_usage_wo_cache": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name="$3"} - ignoring(id, image, endpoint, instance, job, name, service) container_memory_cache{namespace="$1", pod_name="$2", container_name="$3"}`,
"container_memory_usage_wo_cache_all": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"} - ignoring(id, image, endpoint, instance, job, name, service) container_memory_cache{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"}`,
"container_memory_usage": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name="$3"}`,
"container_memory_usage_all": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"}`,
// enterprise
"workspace_cpu_usage": `sum(namespace:container_cpu_usage_seconds_total:sum_rate{namespace =~"$1"})`,
"workspace_memory_usage": `sum(namespace:container_memory_usage_bytes:sum{namespace =~"$1"})`,
"workspace_memory_usage_wo_cache": `sum(namespace:container_memory_usage_bytes_wo_cache:sum{namespace =~"$1"})`,
"workspace_net_bytes_transmitted": `sum(sum by (namespace) (irate(container_network_transmit_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m])))`,
"workspace_net_bytes_received": `sum(sum by (namespace) (irate(container_network_receive_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m])))`,
"workspace_pod_count": `sum(count(kube_pod_info{namespace=~"$1"} unless on(pod) kube_pod_completion_time) by (namespace))`,
}

View File

@@ -15,142 +15,119 @@ package metrics
import (
"strings"
"github.com/emicklei/go-restful"
)
func MakeWorkloadRule(request *restful.Request) string {
// kube_pod_info{created_by_kind="DaemonSet",created_by_name="fluent-bit",endpoint="https-main",
// host_ip="192.168.0.14",instance="10.244.114.187:8443",job="kube-state-metrics",
// namespace="kube-system",node="i-k89a62il",pod="fluent-bit-l5vxr",
// pod_ip="10.244.114.175",service="kube-state-metrics"}
rule := `kube_pod_info{created_by_kind="$1",created_by_name=$2,namespace="$3"}`
kind := strings.Trim(request.PathParameter("workload_kind"), " ")
name := strings.Trim(request.QueryParameter("workload_name"), " ")
namespace := strings.Trim(request.PathParameter("ns_name"), " ")
func MakeWorkloadRule(wkKind, wkName, namespace string) string {
var rule = PodInfoRule
if namespace == "" {
namespace = ".*"
}
// alertnatives values: Deployment StatefulSet ReplicaSet DaemonSet
wkKind = strings.ToLower(wkKind)
// kind alertnatives values: Deployment StatefulSet ReplicaSet DaemonSet
kind = strings.ToLower(kind)
switch kind {
switch wkKind {
case "deployment":
kind = "ReplicaSet"
if name != "" {
name = "~\"" + name + ".*\""
wkKind = ReplicaSet
if wkName != "" {
wkName = "~\"^" + wkName + `-(\\w)+$"`
} else {
name = "~\".*\""
wkName = "~\".*\""
}
rule = strings.Replace(rule, "$1", kind, -1)
rule = strings.Replace(rule, "$2", name, -1)
rule = strings.Replace(rule, "$1", wkKind, -1)
rule = strings.Replace(rule, "$2", wkName, -1)
rule = strings.Replace(rule, "$3", namespace, -1)
return rule
case "replicaset":
kind = "ReplicaSet"
wkKind = ReplicaSet
case "statefulset":
kind = "StatefulSet"
wkKind = StatefulSet
case "daemonset":
kind = "DaemonSet"
wkKind = DaemonSet
}
if name == "" {
name = "~\".*\""
if wkName == "" {
wkName = "~\".*\""
} else {
name = "\"" + name + "\""
wkName = "\"" + wkName + "\""
}
rule = strings.Replace(rule, "$1", kind, -1)
rule = strings.Replace(rule, "$2", name, -1)
rule = strings.Replace(rule, "$1", wkKind, -1)
rule = strings.Replace(rule, "$2", wkName, -1)
rule = strings.Replace(rule, "$3", namespace, -1)
return rule
}
func MakeWorkspacePromQL(metricsName string, namespaceRe2 string) string {
func MakeWorkspacePromQL(metricsName string, nsFilter string) string {
promql := RulePromQLTmplMap[metricsName]
promql = strings.Replace(promql, "$1", namespaceRe2, -1)
promql = strings.Replace(promql, "$1", nsFilter, -1)
return promql
}
func MakeContainerPromQL(request *restful.Request) string {
nsName := strings.Trim(request.PathParameter("ns_name"), " ")
poName := strings.Trim(request.PathParameter("pod_name"), " ")
containerName := strings.Trim(request.PathParameter("container_name"), " ")
// metricType container_cpu_utilisation container_memory_utilisation container_memory_utilisation_wo_cache
metricType := strings.Trim(request.QueryParameter("metrics_name"), " ")
func MakeContainerPromQL(nsName, podName, containerName, metricName, containerFilter string) string {
var promql = ""
if containerName == "" {
// all containers maybe use filter
metricType += "_all"
promql = RulePromQLTmplMap[metricType]
metricName += "_all"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", poName, -1)
containerFilter := strings.Trim(request.QueryParameter("containers_filter"), " ")
promql = strings.Replace(promql, "$2", podName, -1)
if containerFilter == "" {
containerFilter = ".*"
}
promql = strings.Replace(promql, "$3", containerFilter, -1)
return promql
}
promql = RulePromQLTmplMap[metricType]
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", poName, -1)
promql = strings.Replace(promql, "$2", podName, -1)
promql = strings.Replace(promql, "$3", containerName, -1)
return promql
}
func MakePodPromQL(request *restful.Request, params []string) string {
metricType := params[0]
nsName := params[1]
nodeID := params[2]
podName := params[3]
podFilter := params[4]
func MakePodPromQL(metricName, nsName, nodeID, podName, podFilter string) string {
if podFilter == "" {
podFilter = ".*"
}
var promql = ""
if nsName != "" {
// get pod metrics by namespace
if podName != "" {
// specific pod
promql = RulePromQLTmplMap[metricType]
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", podName, -1)
} else {
// all pods
metricType += "_all"
promql = RulePromQLTmplMap[metricType]
if podFilter == "" {
podFilter = ".*"
}
metricName += "_all"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", podFilter, -1)
}
} else if nodeID != "" {
// get pod metrics by nodeid
metricType += "_node"
promql = RulePromQLTmplMap[metricType]
metricName += "_node"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$3", nodeID, -1)
if podName != "" {
// specific pod
promql = strings.Replace(promql, "$2", podName, -1)
} else {
// choose pod use re2 expression
podFilter := strings.Trim(request.QueryParameter("pods_filter"), " ")
if podFilter == "" {
podFilter = ".*"
}
promql = strings.Replace(promql, "$2", podFilter, -1)
}
}
return promql
}
func MakeNamespacePromQL(request *restful.Request, metricsName string) string {
nsName := strings.Trim(request.PathParameter("ns_name"), " ")
metricType := metricsName
var recordingRule = RulePromQLTmplMap[metricType]
nsFilter := strings.Trim(request.QueryParameter("namespaces_filter"), " ")
func MakeNamespacePromQL(nsName string, nsFilter string, metricsName string) string {
var recordingRule = RulePromQLTmplMap[metricsName]
if nsName != "" {
nsFilter = nsName
} else {
@@ -162,37 +139,37 @@ func MakeNamespacePromQL(request *restful.Request, metricsName string) string {
return recordingRule
}
func MakeNodeorClusterRule(request *restful.Request, metricsName string) string {
nodeID := request.PathParameter("node_id")
// cluster rule
func MakeClusterRule(metricsName string) string {
var rule = RulePromQLTmplMap[metricsName]
if strings.Contains(request.SelectedRoutePath(), "monitoring/cluster") {
// cluster
return rule
} else {
// node
nodesFilter := strings.Trim(request.QueryParameter("nodes_filter"), " ")
if nodesFilter == "" {
nodesFilter = ".*"
}
if strings.Contains(metricsName, "disk_size") || strings.Contains(metricsName, "pod") || strings.Contains(metricsName, "usage") {
// disk size promql
if nodeID != "" {
nodesFilter = "{" + "node" + "=" + "\"" + nodeID + "\"" + "}"
} else {
nodesFilter = "{" + "node" + "=~" + "\"" + nodesFilter + "\"" + "}"
}
rule = strings.Replace(rule, "$1", nodesFilter, -1)
} else {
// cpu, memory, network, disk_iops rules
if nodeID != "" {
// specific node
rule = rule + "{" + "node" + "=" + "\"" + nodeID + "\"" + "}"
} else {
// all nodes or specific nodes filted with re2 syntax
rule = rule + "{" + "node" + "=~" + "\"" + nodesFilter + "\"" + "}"
}
}
}
return rule
}
// node rule
func MakeNodeRule(nodeID string, nodesFilter string, metricsName string) string {
var rule = RulePromQLTmplMap[metricsName]
if nodesFilter == "" {
nodesFilter = ".*"
}
if strings.Contains(metricsName, "disk_size") || strings.Contains(metricsName, "pod") || strings.Contains(metricsName, "usage") || strings.Contains(metricsName, "inode") {
// disk size promql
if nodeID != "" {
nodesFilter = "{" + "node" + "=" + "\"" + nodeID + "\"" + "}"
} else {
nodesFilter = "{" + "node" + "=~" + "\"" + nodesFilter + "\"" + "}"
}
rule = strings.Replace(rule, "$1", nodesFilter, -1)
} else {
// cpu, memory, network, disk_iops rules
if nodeID != "" {
// specific node
rule = rule + "{" + "node" + "=" + "\"" + nodeID + "\"" + "}"
} else {
// all nodes or specific nodes filted with re2 syntax
rule = rule + "{" + "node" + "=~" + "\"" + nodesFilter + "\"" + "}"
}
}
return rule
}

View File

@@ -0,0 +1,560 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
const (
ResultTypeVector = "vector"
ResultTypeMatrix = "matrix"
MetricStatus = "status"
MetricStatusError = "error"
MetricStatusSuccess = "success"
ResultItemMetric = "metric"
ResultItemMetricResource = "resource"
ResultItemValue = "value"
ResultItemValues = "values"
ResultSortTypeDesc = "desc"
ResultSortTypeAsce = "asce"
)
const (
MetricNameWorkloadCount = "workload_count"
MetricNameNamespacePodCount = "namespace_pod_count"
MetricNameWorkspaceAllOrganizationCount = "workspace_all_organization_count"
MetricNameWorkspaceAllAccountCount = "workspace_all_account_count"
MetricNameWorkspaceAllProjectCount = "workspace_all_project_count"
MetricNameWorkspaceAllDevopsCount = "workspace_all_devops_project_count"
MetricNameClusterAllProjectCount = "cluster_namespace_count"
MetricNameWorkspaceNamespaceCount = "workspace_namespace_count"
MetricNameWorkspaceDevopsCount = "workspace_devops_project_count"
MetricNameWorkspaceMemberCount = "workspace_member_count"
MetricNameWorkspaceRoleCount = "workspace_role_count"
MetricNameComponentOnLine = "component_online_count"
MetricNameComponentLine = "component_count"
)
const (
WorkspaceResourceKindOrganization = "organization"
WorkspaceResourceKindAccount = "account"
WorkspaceResourceKindNamespace = "namespace"
WorkspaceResourceKindDevops = "devops"
WorkspaceResourceKindMember = "member"
WorkspaceResourceKindRole = "role"
)
const (
MetricLevelCluster = "cluster"
MetricLevelClusterWorkspace = "cluster_workspace"
MetricLevelNode = "node"
MetricLevelWorkspace = "workspace"
MetricLevelNamespace = "namespace"
MetricLevelPod = "pod"
MetricLevelPodName = "pod_name"
MetricLevelContainer = "container"
MetricLevelWorkload = "workload"
)
const (
ReplicaSet = "ReplicaSet"
StatefulSet = "StatefulSet"
DaemonSet = "DaemonSet"
)
const (
NodeStatusRule = `kube_node_status_condition{condition="Ready"} > 0`
PodInfoRule = `kube_pod_info{created_by_kind="$1",created_by_name=$2,namespace="$3"}`
NamespaceLabelRule = `kube_namespace_labels`
)
const (
WorkspaceJoinedKey = "label_kubesphere_io_workspace"
)
type MetricMap map[string]string
var ClusterMetricsNames = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
"cluster_cpu_total",
"cluster_memory_utilisation",
"cluster_memory_bytes_available",
"cluster_memory_bytes_total",
"cluster_memory_bytes_usage",
"cluster_net_utilisation",
"cluster_net_bytes_transmitted",
"cluster_net_bytes_received",
"cluster_disk_read_iops",
"cluster_disk_write_iops",
"cluster_disk_read_throughput",
"cluster_disk_write_throughput",
"cluster_disk_size_usage",
"cluster_disk_size_utilisation",
"cluster_disk_size_capacity",
"cluster_disk_size_available",
"cluster_disk_inode_total",
"cluster_disk_inode_usage",
"cluster_disk_inode_utilisation",
"cluster_node_online",
"cluster_node_offline",
"cluster_node_total",
"cluster_pod_count",
"cluster_pod_quota",
"cluster_pod_utilisation",
"cluster_pod_running_count",
"cluster_pod_succeeded_count",
"cluster_pod_abnormal_count",
"cluster_ingresses_extensions_count",
"cluster_cronjob_count",
"cluster_pvc_count",
"cluster_daemonset_count",
"cluster_deployment_count",
"cluster_endpoint_count",
"cluster_hpa_count",
"cluster_job_count",
"cluster_statefulset_count",
"cluster_replicaset_count",
"cluster_service_count",
"cluster_secret_count",
"cluster_namespace_count",
"workspace_all_project_count",
}
var NodeMetricsNames = []string{
"node_cpu_utilisation",
"node_cpu_total",
"node_cpu_usage",
"node_memory_utilisation",
"node_memory_bytes_usage",
"node_memory_bytes_available",
"node_memory_bytes_total",
"node_net_utilisation",
"node_net_bytes_transmitted",
"node_net_bytes_received",
"node_disk_read_iops",
"node_disk_write_iops",
"node_disk_read_throughput",
"node_disk_write_throughput",
"node_disk_size_capacity",
"node_disk_size_available",
"node_disk_size_usage",
"node_disk_size_utilisation",
"node_disk_inode_total",
"node_disk_inode_usage",
"node_disk_inode_utilisation",
"node_pod_count",
"node_pod_quota",
"node_pod_utilisation",
"node_pod_running_count",
"node_pod_succeeded_count",
"node_pod_abnormal_count",
}
var WorkspaceMetricsNames = []string{
"workspace_cpu_usage",
"workspace_memory_usage",
"workspace_memory_usage_wo_cache",
"workspace_net_bytes_transmitted",
"workspace_net_bytes_received",
"workspace_pod_count",
"workspace_pod_running_count",
"workspace_pod_succeeded_count",
"workspace_pod_abnormal_count",
"workspace_ingresses_extensions_count",
"workspace_cronjob_count",
"workspace_pvc_count",
"workspace_daemonset_count",
"workspace_deployment_count",
"workspace_endpoint_count",
"workspace_hpa_count",
"workspace_job_count",
"workspace_statefulset_count",
"workspace_replicaset_count",
"workspace_service_count",
"workspace_secret_count",
}
var NamespaceMetricsNames = []string{
"namespace_cpu_usage",
"namespace_memory_usage",
"namespace_memory_usage_wo_cache",
"namespace_net_bytes_transmitted",
"namespace_net_bytes_received",
"namespace_pod_count",
"namespace_pod_running_count",
"namespace_pod_succeeded_count",
"namespace_pod_abnormal_count",
"namespace_configmap_count_used",
"namespace_jobs_batch_count_used",
"namespace_roles_count_used",
"namespace_memory_limit_used",
"namespace_pvc_used",
"namespace_memory_request_used",
"namespace_pvc_count_used",
"namespace_cronjobs_batch_count_used",
"namespace_ingresses_extensions_count_used",
"namespace_cpu_limit_used",
"namespace_storage_request_used",
"namespace_deployment_count_used",
"namespace_pod_count_used",
"namespace_statefulset_count_used",
"namespace_daemonset_count_used",
"namespace_secret_count_used",
"namespace_service_count_used",
"namespace_cpu_request_used",
"namespace_service_loadbalancer_used",
"namespace_configmap_count_hard",
"namespace_jobs_batch_count_hard",
"namespace_roles_count_hard",
"namespace_memory_limit_hard",
"namespace_pvc_hard",
"namespace_memory_request_hard",
"namespace_pvc_count_hard",
"namespace_cronjobs_batch_count_hard",
"namespace_ingresses_extensions_count_hard",
"namespace_cpu_limit_hard",
"namespace_storage_request_hard",
"namespace_deployment_count_hard",
"namespace_pod_count_hard",
"namespace_statefulset_count_hard",
"namespace_daemonset_count_hard",
"namespace_secret_count_hard",
"namespace_service_count_hard",
"namespace_cpu_request_hard",
"namespace_service_loadbalancer_hard",
"namespace_cronjob_count",
"namespace_pvc_count",
"namespace_daemonset_count",
"namespace_deployment_count",
"namespace_endpoint_count",
"namespace_hpa_count",
"namespace_job_count",
"namespace_statefulset_count",
"namespace_replicaset_count",
"namespace_service_count",
"namespace_secret_count",
}
var PodMetricsNames = []string{
"pod_cpu_usage",
"pod_memory_usage",
"pod_memory_usage_wo_cache",
"pod_net_bytes_transmitted",
"pod_net_bytes_received",
}
var WorkloadMetricsNames = []string{
"workload_pod_cpu_usage",
"workload_pod_memory_usage",
"workload_pod_memory_usage_wo_cache",
"workload_pod_net_bytes_transmitted",
"workload_pod_net_bytes_received",
}
var RulePromQLTmplMap = MetricMap{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
"cluster_cpu_usage": `:node_cpu_utilisation:avg1m * sum(node:node_num_cpu:sum)`,
"cluster_cpu_total": "sum(node:node_num_cpu:sum)",
"cluster_memory_utilisation": ":node_memory_utilisation:",
"cluster_memory_bytes_available": "sum(node:node_memory_bytes_available:sum)",
"cluster_memory_bytes_total": "sum(node:node_memory_bytes_total:sum)",
"cluster_memory_bytes_usage": "sum(node:node_memory_bytes_total:sum) - sum(node:node_memory_bytes_available:sum)",
"cluster_net_utilisation": ":node_net_utilisation:sum_irate",
"cluster_net_bytes_transmitted": "sum(node:node_net_bytes_transmitted:sum_irate)",
"cluster_net_bytes_received": "sum(node:node_net_bytes_received:sum_irate)",
"cluster_disk_read_iops": "sum(node:data_volume_iops_reads:sum)",
"cluster_disk_write_iops": "sum(node:data_volume_iops_writes:sum)",
"cluster_disk_read_throughput": "sum(node:data_volume_throughput_bytes_read:sum)",
"cluster_disk_write_throughput": "sum(node:data_volume_throughput_bytes_written:sum)",
"cluster_disk_size_usage": `sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:)) - sum(sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_size_utilisation": `(sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:)) - sum(sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))) / sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_size_capacity": `sum(sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_size_available": `sum(sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:))`,
"cluster_disk_inode_total": `sum(node:disk_inodes_total:)`,
"cluster_disk_inode_usage": `sum(node:disk_inodes_total:) - sum(node:disk_inodes_free:)`,
"cluster_disk_inode_utilisation": `1 - sum(node:disk_inodes_free:) / sum(node:disk_inodes_total:)`,
"cluster_namespace_count": `count(kube_namespace_annotations)`,
"workspace_all_project_count": `count(kube_namespace_annotations)`,
// cluster_pod_count = cluster_pod_running_count + cluster_pod_succeeded_count + cluster_pod_abnormal_count
"cluster_pod_count": `sum(kube_pod_status_phase)`,
"cluster_pod_utilisation": `sum(kube_pod_status_phase) / sum(kube_node_status_capacity_pods)`,
"cluster_pod_running_count": `sum(kube_pod_status_phase{phase="Running"})`,
"cluster_pod_succeeded_count": `sum(kube_pod_status_phase{phase="Succeeded"})`,
"cluster_pod_abnormal_count": `sum(kube_pod_status_phase{phase=~"Failed|Pending|Unknown"})`,
"cluster_node_online": `sum(kube_node_status_condition{condition="Ready",status="true"})`,
"cluster_node_offline": `sum(kube_node_status_condition{condition="Ready",status=~"unknown|false"})`,
"cluster_node_total": `sum(kube_node_status_condition{condition="Ready"})`,
"cluster_pod_quota": `sum(kube_node_status_capacity_pods)`,
"cluster_ingresses_extensions_count": `sum(kube_resourcequota{type="used", resource="count/ingresses.extensions"}) by (resource, type)`,
"cluster_configmap_count_used": `sum(kube_resourcequota{type="used", resource="count/configmaps"}) by (resource, type)`,
"cluster_jobs_batch_count_used": `sum(kube_resourcequota{type="used", resource="count/jobs.batch"}) by (resource, type)`,
"cluster_roles_count_used": `sum(kube_resourcequota{type="used", resource="count/roles.rbac.authorization.k8s.io"}) by (resource, type)`,
"cluster_memory_limit_used": `sum(kube_resourcequota{type="used", resource="limits.memory"}) by (resource, type)`,
"cluster_pvc_used": `sum(kube_resourcequota{type="used", resource="persistentvolumeclaims"}) by (resource, type)`,
"cluster_memory_request_used": `sum(kube_resourcequota{type="used", resource="requests.memory"}) by (resource, type)`,
"cluster_pvc_count_used": `sum(kube_resourcequota{type="used", resource="count/persistentvolumeclaims"}) by (resource, type)`,
"cluster_cronjobs_batch_count_used": `sum(kube_resourcequota{type="used", resource="count/cronjobs.batch"}) by (resource, type)`,
"cluster_ingresses_extensions_count_used": `sum(kube_resourcequota{type="used", resource="count/ingresses.extensions"}) by (resource, type)`,
"cluster_cpu_limit_used": `sum(kube_resourcequota{type="used", resource="limits.cpu"}) by (resource, type)`,
"cluster_storage_request_used": `sum(kube_resourcequota{type="used", resource="requests.storage"}) by (resource, type)`,
"cluster_deployment_count_used": `sum(kube_resourcequota{type="used", resource="count/deployments.apps"}) by (resource, type)`,
"cluster_pod_count_used": `sum(kube_resourcequota{type="used", resource="count/pods"}) by (resource, type)`,
"cluster_statefulset_count_used": `sum(kube_resourcequota{type="used", resource="count/statefulsets.apps"}) by (resource, type)`,
"cluster_daemonset_count_used": `sum(kube_resourcequota{type="used", resource="count/daemonsets.apps"}) by (resource, type)`,
"cluster_secret_count_used": `sum(kube_resourcequota{type="used", resource="count/secrets"}) by (resource, type)`,
"cluster_service_count_used": `sum(kube_resourcequota{type="used", resource="count/services"}) by (resource, type)`,
"cluster_cpu_request_used": `sum(kube_resourcequota{type="used", resource="requests.cpu"}) by (resource, type)`,
"cluster_service_loadbalancer_used": `sum(kube_resourcequota{type="used", resource="services.loadbalancers"}) by (resource, type)`,
"cluster_configmap_count_hard": `sum(kube_resourcequota{type="hard", resource="count/configmaps"}) by (resource, type)`,
"cluster_jobs_batch_count_hard": `sum(kube_resourcequota{type="hard", resource="count/jobs.batch"}) by (resource, type)`,
"cluster_roles_count_hard": `sum(kube_resourcequota{type="hard", resource="count/roles.rbac.authorization.k8s.io"}) by (resource, type)`,
"cluster_memory_limit_hard": `sum(kube_resourcequota{type="hard", resource="limits.memory"}) by (resource, type)`,
"cluster_pvc_hard": `sum(kube_resourcequota{type="hard", resource="persistentvolumeclaims"}) by (resource, type)`,
"cluster_memory_request_hard": `sum(kube_resourcequota{type="hard", resource="requests.memory"}) by (resource, type)`,
"cluster_pvc_count_hard": `sum(kube_resourcequota{type="hard", resource="count/persistentvolumeclaims"}) by (resource, type)`,
"cluster_cronjobs_batch_count_hard": `sum(kube_resourcequota{type="hard", resource="count/cronjobs.batch"}) by (resource, type)`,
"cluster_ingresses_extensions_count_hard": `sum(kube_resourcequota{type="hard", resource="count/ingresses.extensions"}) by (resource, type)`,
"cluster_cpu_limit_hard": `sum(kube_resourcequota{type="hard", resource="limits.cpu"}) by (resource, type)`,
"cluster_storage_request_hard": `sum(kube_resourcequota{type="hard", resource="requests.storage"}) by (resource, type)`,
"cluster_deployment_count_hard": `sum(kube_resourcequota{type="hard", resource="count/deployments.apps"}) by (resource, type)`,
"cluster_pod_count_hard": `sum(kube_resourcequota{type="hard", resource="count/pods"}) by (resource, type)`,
"cluster_statefulset_count_hard": `sum(kube_resourcequota{type="hard", resource="count/statefulsets.apps"}) by (resource, type)`,
"cluster_daemonset_count_hard": `sum(kube_resourcequota{type="hard", resource="count/daemonsets.apps"}) by (resource, type)`,
"cluster_secret_count_hard": `sum(kube_resourcequota{type="hard", resource="count/secrets"}) by (resource, type)`,
"cluster_service_count_hard": `sum(kube_resourcequota{type="hard", resource="count/services"}) by (resource, type)`,
"cluster_cpu_request_hard": `sum(kube_resourcequota{type="hard", resource="requests.cpu"}) by (resource, type)`,
"cluster_service_loadbalancer_hard": `sum(kube_resourcequota{type="hard", resource="services.loadbalancers"}) by (resource, type)`,
"cluster_cronjob_count": `sum(kube_cronjob_labels)`,
"cluster_pvc_count": `sum(kube_persistentvolumeclaim_info)`,
"cluster_daemonset_count": `sum(kube_daemonset_labels)`,
"cluster_deployment_count": `sum(kube_deployment_labels)`,
"cluster_endpoint_count": `sum(kube_endpoint_labels)`,
"cluster_hpa_count": `sum(kube_hpa_labels)`,
"cluster_job_count": `sum(kube_job_labels)`,
"cluster_statefulset_count": `sum(kube_statefulset_labels)`,
"cluster_replicaset_count": `count(kube_replicaset_created)`,
"cluster_service_count": `sum(kube_service_info)`,
"cluster_secret_count": `sum(kube_secret_info)`,
"cluster_pv_count": `sum(kube_persistentvolume_labels)`,
//node
"node_cpu_utilisation": "node:node_cpu_utilisation:avg1m",
"node_cpu_total": "node:node_num_cpu:sum",
"node_memory_utilisation": "node:node_memory_utilisation:",
"node_memory_bytes_available": "node:node_memory_bytes_available:sum",
"node_memory_bytes_total": "node:node_memory_bytes_total:sum",
// Node network utilisation (bytes received + bytes transmitted per second)
"node_net_utilisation": "node:node_net_utilisation:sum_irate",
// Node network bytes transmitted per second
"node_net_bytes_transmitted": "node:node_net_bytes_transmitted:sum_irate",
// Node network bytes received per second
"node_net_bytes_received": "node:node_net_bytes_received:sum_irate",
// node:data_volume_iops_reads:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_read_iops": "node:data_volume_iops_reads:sum",
// node:data_volume_iops_writes:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_write_iops": "node:data_volume_iops_writes:sum",
// node:data_volume_throughput_bytes_read:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_read_throughput": "node:data_volume_throughput_bytes_read:sum",
// node:data_volume_throughput_bytes_written:sum{node=~"i-5xcldxos|i-6soe9zl1"}
"node_disk_write_throughput": "node:data_volume_throughput_bytes_written:sum",
"node_disk_size_capacity": `sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_size_available": `sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_size_usage": `sum by (node) ((node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1) -sum by (node) ((node_filesystem_avail{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_size_utilisation": `sum by (node) (((node_filesystem_size{mountpoint="/", job="node-exporter"} - node_filesystem_avail{mountpoint="/", job="node-exporter"}) / node_filesystem_size{mountpoint="/", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1)`,
"node_disk_inode_total": `node:disk_inodes_total:$1`,
"node_disk_inode_usage": `node:disk_inodes_total:$1 - node:disk_inodes_free:$1`,
"node_disk_inode_utilisation": `(1 - (node:disk_inodes_free:$1 / node:disk_inodes_total:$1))`,
"node_pod_count": `count(kube_pod_info$1) by (node)`,
"node_pod_quota": `sum(kube_node_status_capacity_pods$1) by (node)`,
"node_pod_utilisation": `sum(kube_pod_info$1) by (node) / sum(kube_node_status_capacity_pods$1) by (node)`,
"node_pod_running_count": `count(kube_pod_info$1 unless on (pod) (kube_pod_status_phase{phase=~"Failed|Pending|Unknown|Succeeded"} > 0)) by (node)`,
"node_pod_succeeded_count": `count(kube_pod_info$1 unless on (pod) (kube_pod_status_phase{phase=~"Failed|Pending|Unknown|Running"} > 0)) by (node)`,
"node_pod_abnormal_count": `count(kube_pod_info$1 unless on (pod) (kube_pod_status_phase{phase=~"Succeeded|Running"} > 0)) by (node)`,
// without log node: unless on(node) kube_node_labels{label_role="log"}
"node_cpu_usage": `node:node_cpu_utilisation:avg1m$1 * node:node_num_cpu:sum$1`,
"node_memory_bytes_usage": "node:node_memory_bytes_total:sum$1 - node:node_memory_bytes_available:sum$1",
//namespace
"namespace_cpu_usage": `namespace:container_cpu_usage_seconds_total:sum_rate{namespace=~"$1"}`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace=~"$1"}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace=~"$1"}`,
"namespace_net_bytes_transmitted": `sum by (namespace) (irate(container_network_transmit_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m]))`,
"namespace_net_bytes_received": `sum by (namespace) (irate(container_network_receive_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m]))`,
"namespace_pod_count": `sum(kube_pod_status_phase{namespace=~"$1"}) by (namespace)`,
"namespace_pod_running_count": `sum(kube_pod_status_phase{phase="Running", namespace=~"$1"}) by (namespace)`,
"namespace_pod_succeeded_count": `sum(kube_pod_status_phase{phase="Succeeded", namespace=~"$1"}) by (namespace)`,
"namespace_pod_abnormal_count": `sum(kube_pod_status_phase{phase=~"Failed|Pending|Unknown", namespace=~"$1"}) by (namespace)`,
"namespace_configmap_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/configmaps"}) by (namespace, resource, type)`,
"namespace_jobs_batch_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/jobs.batch"}) by (namespace, resource, type)`,
"namespace_roles_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/roles.rbac.authorization.k8s.io"}) by (namespace, resource, type)`,
"namespace_memory_limit_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="limits.memory"}) by (namespace, resource, type)`,
"namespace_pvc_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="persistentvolumeclaims"}) by (namespace, resource, type)`,
"namespace_memory_request_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="requests.memory"}) by (namespace, resource, type)`,
"namespace_pvc_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/persistentvolumeclaims"}) by (namespace, resource, type)`,
"namespace_cronjobs_batch_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/cronjobs.batch"}) by (namespace, resource, type)`,
"namespace_ingresses_extensions_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/ingresses.extensions"}) by (namespace, resource, type)`,
"namespace_cpu_limit_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="limits.cpu"}) by (namespace, resource, type)`,
"namespace_storage_request_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="requests.storage"}) by (namespace, resource, type)`,
"namespace_deployment_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/deployments.apps"}) by (namespace, resource, type)`,
"namespace_pod_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/pods"}) by (namespace, resource, type)`,
"namespace_statefulset_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/statefulsets.apps"}) by (namespace, resource, type)`,
"namespace_daemonset_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/daemonsets.apps"}) by (namespace, resource, type)`,
"namespace_secret_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/secrets"}) by (namespace, resource, type)`,
"namespace_service_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/services"}) by (namespace, resource, type)`,
"namespace_cpu_request_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="requests.cpu"}) by (namespace, resource, type)`,
"namespace_service_loadbalancer_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="services.loadbalancers"}) by (namespace, resource, type)`,
"namespace_configmap_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/configmaps"}) by (namespace, resource, type)`,
"namespace_jobs_batch_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/jobs.batch"}) by (namespace, resource, type)`,
"namespace_roles_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/roles.rbac.authorization.k8s.io"}) by (namespace, resource, type)`,
"namespace_memory_limit_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="limits.memory"}) by (namespace, resource, type)`,
"namespace_pvc_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="persistentvolumeclaims"}) by (namespace, resource, type)`,
"namespace_memory_request_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="requests.memory"}) by (namespace, resource, type)`,
"namespace_pvc_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/persistentvolumeclaims"}) by (namespace, resource, type)`,
"namespace_cronjobs_batch_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/cronjobs.batch"}) by (namespace, resource, type)`,
"namespace_ingresses_extensions_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/ingresses.extensions"}) by (namespace, resource, type)`,
"namespace_cpu_limit_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="limits.cpu"}) by (namespace, resource, type)`,
"namespace_storage_request_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="requests.storage"}) by (namespace, resource, type)`,
"namespace_deployment_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/deployments.apps"}) by (namespace, resource, type)`,
"namespace_pod_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/pods"}) by (namespace, resource, type)`,
"namespace_statefulset_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/statefulsets.apps"}) by (namespace, resource, type)`,
"namespace_daemonset_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/daemonsets.apps"}) by (namespace, resource, type)`,
"namespace_secret_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/secrets"}) by (namespace, resource, type)`,
"namespace_service_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/services"}) by (namespace, resource, type)`,
"namespace_cpu_request_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="requests.cpu"}) by (namespace, resource, type)`,
"namespace_service_loadbalancer_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="services.loadbalancers"}) by (namespace, resource, type)`,
"namespace_cronjob_count": `sum(kube_cronjob_labels{namespace=~"$1"}) by (namespace)`,
"namespace_pvc_count": `sum(kube_persistentvolumeclaim_info{namespace=~"$1"}) by (namespace)`,
"namespace_daemonset_count": `sum(kube_daemonset_labels{namespace=~"$1"}) by (namespace)`,
"namespace_deployment_count": `sum(kube_deployment_labels{namespace=~"$1"}) by (namespace)`,
"namespace_endpoint_count": `sum(kube_endpoint_labels{namespace=~"$1"}) by (namespace)`,
"namespace_hpa_count": `sum(kube_hpa_labels{namespace=~"$1"}) by (namespace)`,
"namespace_job_count": `sum(kube_job_labels{namespace=~"$1"}) by (namespace)`,
"namespace_statefulset_count": `sum(kube_statefulset_labels{namespace=~"$1"}) by (namespace)`,
"namespace_replicaset_count": `count(kube_replicaset_created{namespace=~"$1"}) by (namespace)`,
"namespace_service_count": `sum(kube_service_info{namespace=~"$1"}) by (namespace)`,
"namespace_secret_count": `sum(kube_secret_info{namespace=~"$1"}) by (namespace)`,
// pod
"pod_cpu_usage": `sum(irate(container_cpu_usage_seconds_total{job="kubelet", namespace="$1", pod_name="$2", image!=""}[5m])) by (namespace, pod_name)`,
"pod_memory_usage": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name="$2", image!=""}) by (namespace, pod_name)`,
"pod_memory_usage_wo_cache": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name="$2", image!=""} - container_memory_cache{job="kubelet", namespace="$1", pod_name="$2",image!=""}) by (namespace, pod_name)`,
"pod_net_bytes_transmitted": `sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{namespace="$1", pod_name!="", pod_name="$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_net_bytes_received": `sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{namespace="$1", pod_name!="", pod_name="$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_cpu_usage_all": `sum(irate(container_cpu_usage_seconds_total{job="kubelet", namespace="$1", pod_name=~"$2", image!=""}[5m])) by (namespace, pod_name)`,
"pod_memory_usage_all": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name=~"$2", image!=""}) by (namespace, pod_name)`,
"pod_memory_usage_wo_cache_all": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name=~"$2", image!=""} - container_memory_cache{job="kubelet", namespace="$1", pod_name=~"$2", image!=""}) by (namespace, pod_name)`,
"pod_net_bytes_transmitted_all": `sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{namespace="$1", pod_name!="", pod_name=~"$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_net_bytes_received_all": `sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{namespace="$1", pod_name!="", pod_name=~"$2", interface="eth0", job="kubelet"}[5m]))`,
"pod_cpu_usage_node": `sum by (node, pod) (label_join(irate(container_cpu_usage_seconds_total{job="kubelet",pod_name=~"$2", image!=""}[5m]), "pod", " ", "pod_name") * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{node=~"$3"})`,
"pod_memory_usage_node": `sum by (node, pod) (label_join(container_memory_usage_bytes{job="kubelet",pod_name=~"$2", image!=""}, "pod", " ", "pod_name") * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{node=~"$3"})`,
"pod_memory_usage_wo_cache_node": `sum by (node, pod) ((label_join(container_memory_usage_bytes{job="kubelet",pod_name=~"$2", image!=""}, "pod", " ", "pod_name") - label_join(container_memory_cache{job="kubelet",pod_name=~"$2", image!=""}, "pod", " ", "pod_name")) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{node=~"$3"})`,
// container
"container_cpu_usage": `sum(irate(container_cpu_usage_seconds_total{namespace="$1", pod_name="$2", container_name="$3"}[5m])) by (namespace, pod_name, container_name)`,
"container_cpu_usage_all": `sum(irate(container_cpu_usage_seconds_total{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"}[5m])) by (namespace, pod_name, container_name)`,
"container_memory_usage_wo_cache": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name="$3"} - ignoring(id, image, endpoint, instance, job, name, service) container_memory_cache{namespace="$1", pod_name="$2", container_name="$3"}`,
"container_memory_usage_wo_cache_all": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"} - ignoring(id, image, endpoint, instance, job, name, service) container_memory_cache{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"}`,
"container_memory_usage": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name="$3"}`,
"container_memory_usage_all": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name=~"$3", container_name!="POD"}`,
// workspace
"workspace_cpu_usage": `sum(namespace:container_cpu_usage_seconds_total:sum_rate{namespace =~"$1"})`,
"workspace_memory_usage": `sum(namespace:container_memory_usage_bytes:sum{namespace =~"$1"})`,
"workspace_memory_usage_wo_cache": `sum(namespace:container_memory_usage_bytes_wo_cache:sum{namespace =~"$1"})`,
"workspace_net_bytes_transmitted": `sum(sum by (namespace) (irate(container_network_transmit_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m])))`,
"workspace_net_bytes_received": `sum(sum by (namespace) (irate(container_network_receive_bytes_total{namespace=~"$1", pod_name!="", interface="eth0", job="kubelet"}[5m])))`,
"workspace_pod_count": `sum(kube_pod_status_phase{namespace=~"$1"})`,
"workspace_pod_running_count": `sum(kube_pod_status_phase{phase="Running", namespace=~"$1"})`,
"workspace_pod_succeeded_count": `sum(kube_pod_status_phase{phase="Succeeded", namespace=~"$1"})`,
"workspace_pod_abnormal_count": `sum(kube_pod_status_phase{phase=~"Failed|Pending|Unknown", namespace=~"$1"})`,
"workspace_configmap_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/configmaps"}) by (resource, type)`,
"workspace_jobs_batch_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/jobs.batch"}) by (resource, type)`,
"workspace_roles_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/roles.rbac.authorization.k8s.io"}) by (resource, type)`,
"workspace_memory_limit_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="limits.memory"}) by (resource, type)`,
"workspace_pvc_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="persistentvolumeclaims"}) by (resource, type)`,
"workspace_memory_request_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="requests.memory"}) by (resource, type)`,
"workspace_pvc_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/persistentvolumeclaims"}) by (resource, type)`,
"workspace_cronjobs_batch_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/cronjobs.batch"}) by (resource, type)`,
"workspace_ingresses_extensions_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/ingresses.extensions"}) by (resource, type)`,
"workspace_cpu_limit_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="limits.cpu"}) by (resource, type)`,
"workspace_storage_request_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="requests.storage"}) by (resource, type)`,
"workspace_deployment_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/deployments.apps"}) by (resource, type)`,
"workspace_pod_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/pods"}) by (resource, type)`,
"workspace_statefulset_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/statefulsets.apps"}) by (resource, type)`,
"workspace_daemonset_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/daemonsets.apps"}) by (resource, type)`,
"workspace_secret_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/secrets"}) by (resource, type)`,
"workspace_service_count_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/services"}) by (resource, type)`,
"workspace_cpu_request_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="requests.cpu"}) by (resource, type)`,
"workspace_service_loadbalancer_used": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="services.loadbalancers"}) by (resource, type)`,
"workspace_configmap_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/configmaps"}) by (resource, type)`,
"workspace_jobs_batch_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/jobs.batch"}) by (resource, type)`,
"workspace_roles_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/roles.rbac.authorization.k8s.io"}) by (resource, type)`,
"workspace_memory_limit_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="limits.memory"}) by (resource, type)`,
"workspace_pvc_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="persistentvolumeclaims"}) by (resource, type)`,
"workspace_memory_request_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="requests.memory"}) by (resource, type)`,
"workspace_pvc_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/persistentvolumeclaims"}) by (resource, type)`,
"workspace_cronjobs_batch_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/cronjobs.batch"}) by (resource, type)`,
"workspace_ingresses_extensions_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/ingresses.extensions"}) by (resource, type)`,
"workspace_cpu_limit_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="limits.cpu"}) by (resource, type)`,
"workspace_storage_request_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="requests.storage"}) by (resource, type)`,
"workspace_deployment_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/deployments.apps"}) by (resource, type)`,
"workspace_pod_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/pods"}) by (resource, type)`,
"workspace_statefulset_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/statefulsets.apps"}) by (resource, type)`,
"workspace_daemonset_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/daemonsets.apps"}) by (resource, type)`,
"workspace_secret_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/secrets"}) by (resource, type)`,
"workspace_service_count_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="count/services"}) by (resource, type)`,
"workspace_cpu_request_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="requests.cpu"}) by (resource, type)`,
"workspace_service_loadbalancer_hard": `sum(kube_resourcequota{type="hard", namespace=~"$1", resource="services.loadbalancers"}) by (resource, type)`,
"workspace_ingresses_extensions_count": `sum(kube_resourcequota{type="used", namespace=~"$1", resource="count/ingresses.extensions"}) by (resource, type)`,
"workspace_cronjob_count": `sum(kube_cronjob_labels{namespace=~"$1"})`,
"workspace_pvc_count": `sum(kube_persistentvolumeclaim_info{namespace=~"$1"})`,
"workspace_daemonset_count": `sum(kube_daemonset_labels{namespace=~"$1"})`,
"workspace_deployment_count": `sum(kube_deployment_labels{namespace=~"$1"})`,
"workspace_endpoint_count": `sum(kube_endpoint_labels{namespace=~"$1"})`,
"workspace_hpa_count": `sum(kube_hpa_labels{namespace=~"$1"})`,
"workspace_job_count": `sum(kube_job_labels{namespace=~"$1"})`,
"workspace_statefulset_count": `sum(kube_statefulset_labels{namespace=~"$1"})`,
"workspace_replicaset_count": `count(kube_replicaset_created{namespace=~"$1"})`,
"workspace_service_count": `sum(kube_service_info{namespace=~"$1"})`,
"workspace_secret_count": `sum(kube_secret_info{namespace=~"$1"})`,
}

View File

@@ -1,69 +0,0 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
type FormatedLevelMetric struct {
MetricsLevel string `json:"metrics_level"`
Results []FormatedMetric `json:"results"`
}
type FormatedMetric struct {
MetricName string `json:"metric_name, omitempty"`
Status string `json:"status"`
Data FormatedMetricData `json:"data, omitempty"`
}
type FormatedMetricData struct {
Result []map[string]interface{} `json:"result"`
ResultType string `json:"resultType"`
}
type CommonMetricsResult struct {
Status string `json:"status"`
Data CommonMetricsData `json:"data"`
}
type CommonMetricsData struct {
Result []CommonResultItem `json:"result"`
ResultType string `json:"resultType"`
}
type CommonResultItem struct {
KubePodMetric KubePodMetric `json:"metric"`
Value interface{} `json:"value"`
}
/**
"__name__": "kube_pod_info",
"created_by_kind": "\\u003cnone\\u003e",
"created_by_name": "\\u003cnone\\u003e",
"endpoint": "https-main",
"host_ip": "192.168.0.13",
"instance": "10.244.114.187:8443",
"job": "kube-state-metrics",
"namespace": "kube-system",
"node": "i-39p7faw6",
"pod": "cloud-controller-manager-i-39p7faw6",
"pod_ip": "192.168.0.13",
"service": "kube-state-metrics"
*/
type KubePodMetric struct {
CreatedByKind string `json:"created_by_kind"`
CreatedByName string `json:"created_by_name"`
Namespace string `json:"namespace"`
Pod string `json:"pod"`
}

View File

@@ -0,0 +1,51 @@
package metrics
import (
"net/url"
"strings"
"k8s.io/api/core/v1"
"kubesphere.io/kubesphere/pkg/client"
)
func GetNamespacesWithMetrics(namespaces []*v1.Namespace) []*v1.Namespace {
var nsNameList []string
for i := range namespaces {
nsNameList = append(nsNameList, namespaces[i].Name)
}
nsFilter := "^(" + strings.Join(nsNameList, "|") + ")$"
var timeRelateParams = make(url.Values)
params := client.MonitoringRequestParams{
NsFilter: nsFilter,
Params: timeRelateParams,
QueryType: client.DefaultQueryType,
MetricsFilter: "namespace_cpu_usage|namespace_memory_usage_wo_cache|namespace_pod_count",
}
rawMetrics := MonitorAllMetrics(&params, MetricLevelNamespace)
for _, result := range rawMetrics.Results {
for _, data := range result.Data.Result {
metricDescMap, ok := data["metric"].(map[string]interface{})
if ok {
if ns, exist := metricDescMap["namespace"]; exist {
timeAndValue, ok := data["value"].([]interface{})
if ok && len(timeAndValue) == 2 {
for i := 0; i < len(namespaces); i++ {
if namespaces[i].Name == ns {
if namespaces[i].Annotations == nil {
namespaces[i].Annotations = make(map[string]string, 0)
}
namespaces[i].Annotations[result.MetricName] = timeAndValue[1].(string)
}
}
}
}
}
}
}
return namespaces
}

268
pkg/models/metrics/util.go Normal file
View File

@@ -0,0 +1,268 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"math"
"sort"
"strconv"
"unicode"
"github.com/golang/glog"
)
const (
DefaultPageLimit = 5
DefaultPage = 1
)
type FormatedMetricDataWrapper struct {
fmtMetricData FormatedMetricData
by func(p, q *map[string]interface{}) bool
}
func (wrapper FormatedMetricDataWrapper) Len() int {
return len(wrapper.fmtMetricData.Result)
}
func (wrapper FormatedMetricDataWrapper) Less(i, j int) bool {
return wrapper.by(&wrapper.fmtMetricData.Result[i], &wrapper.fmtMetricData.Result[j])
}
func (wrapper FormatedMetricDataWrapper) Swap(i, j int) {
wrapper.fmtMetricData.Result[i], wrapper.fmtMetricData.Result[j] = wrapper.fmtMetricData.Result[j], wrapper.fmtMetricData.Result[i]
}
// sorted metric by ascending or descending order
func Sort(sortMetricName string, sortType string, fmtLevelMetric *FormatedLevelMetric, resourceType string) (*FormatedLevelMetric, int) {
var maxLength = 0
for _, metricItem := range fmtLevelMetric.Results {
if metricItem.Data.ResultType == ResultTypeVector && metricItem.Status == MetricStatusSuccess {
if maxLength < len(metricItem.Data.Result) {
maxLength = len(metricItem.Data.Result)
}
}
}
if sortMetricName == "" {
return fmtLevelMetric, maxLength
}
// default sort type is descending order
if sortType == "" {
sortType = ResultSortTypeDesc
}
var currentResourceMap = make(map[string]int)
// indexMap store sorted index for each node/namespace/pod
var indexMap = make(map[string]int)
for _, metricItem := range fmtLevelMetric.Results {
if metricItem.Data.ResultType == ResultTypeVector && metricItem.Status == MetricStatusSuccess {
if metricItem.MetricName == sortMetricName {
if sortType == ResultSortTypeAsce {
// desc
sort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {
value1 := (*p)[ResultItemValue].([]interface{})
value2 := (*q)[ResultItemValue].([]interface{})
v1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)
v2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)
return v1 < v2
}})
} else {
// desc
sort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {
value1 := (*p)[ResultItemValue].([]interface{})
value2 := (*q)[ResultItemValue].([]interface{})
v1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)
v2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)
return v1 > v2
}})
}
i := 0
for _, r := range metricItem.Data.Result {
// for some reasons, 'metric' may not contain `resourceType` field
// example: {"metric":{},"value":[1541142931.731,"3"]}
k, exist := r[ResultItemMetric].(map[string]interface{})[resourceType]
if exist {
indexMap[k.(string)] = i
i = i + 1
}
}
}
// iterator all metric to find max metricItems length
for _, r := range metricItem.Data.Result {
k, ok := r[ResultItemMetric].(map[string]interface{})[resourceType]
if ok {
currentResourceMap[k.(string)] = 1
}
}
}
}
// sort other metric
for i := 0; i < len(fmtLevelMetric.Results); i++ {
re := fmtLevelMetric.Results[i]
if re.MetricName != sortMetricName && re.Data.ResultType == ResultTypeVector && re.Status == MetricStatusSuccess {
sortedMetric := make([]map[string]interface{}, len(indexMap))
noneSortedMetric := make([]map[string]interface{}, 0)
for j := 0; j < len(re.Data.Result); j++ {
r := re.Data.Result[j]
k, ok := r[ResultItemMetric].(map[string]interface{})[resourceType]
if ok {
index, exist := indexMap[k.(string)]
if exist {
sortedMetric[index] = r
} else {
noneSortedMetric = append(noneSortedMetric, r)
}
}
}
sortedMetric = append(sortedMetric, noneSortedMetric...)
fmtLevelMetric.Results[i].Data.Result = sortedMetric
}
}
return fmtLevelMetric, maxLength
}
func Page(pageNum string, limitNum string, fmtLevelMetric *FormatedLevelMetric, maxLength int) interface{} {
// matrix type can not be sorted
for _, metricItem := range fmtLevelMetric.Results {
if metricItem.Data.ResultType != ResultTypeVector {
return fmtLevelMetric
}
}
var page = DefaultPage
if pageNum != "" {
p, err := strconv.Atoi(pageNum)
if err != nil {
glog.Errorln(err)
} else {
if p > 0 {
page = p
}
}
} else {
// the default mode is none paging
return fmtLevelMetric
}
var limit = DefaultPageLimit
if limitNum != "" {
l, err := strconv.Atoi(limitNum)
if err != nil {
glog.Errorln(err)
} else {
if l > 0 {
limit = l
}
}
}
// the i page: [(page-1) * limit, (page) * limit - 1]
start := (page - 1) * limit
end := (page)*limit - 1
for i := 0; i < len(fmtLevelMetric.Results); i++ {
// only pageing when result type is `vector` and result status is `success`
if fmtLevelMetric.Results[i].Data.ResultType != ResultTypeVector || fmtLevelMetric.Results[i].Status != MetricStatusSuccess {
continue
}
resultLen := len(fmtLevelMetric.Results[i].Data.Result)
if start >= resultLen {
fmtLevelMetric.Results[i].Data.Result = nil
continue
}
if end >= resultLen {
end = resultLen - 1
}
slice := fmtLevelMetric.Results[i].Data.Result[start : end+1]
fmtLevelMetric.Results[i].Data.Result = slice
}
allPage := int(math.Ceil(float64(maxLength) / float64(limit)))
return &PagedFormatedLevelMetric{
Message: "paged",
TotalPage: allPage,
CurrentPage: page,
Metric: *fmtLevelMetric,
}
}
// maybe this function is time consuming
func ReformatJson(metric string, metricsName string, needDelParams ...string) *FormatedMetric {
var formatMetric FormatedMetric
err := json.Unmarshal([]byte(metric), &formatMetric)
if err != nil {
glog.Errorln("Unmarshal metric json failed", err)
}
if formatMetric.MetricName == "" {
if metricsName != "" {
formatMetric.MetricName = metricsName
}
}
// retrive metrics success
if formatMetric.Status == MetricStatusSuccess {
result := formatMetric.Data.Result
for _, res := range result {
metric, exist := res[ResultItemMetric]
metricMap, sure := metric.(map[string]interface{})
if exist && sure {
delete(metricMap, "__name__")
}
if len(needDelParams) > 0 {
for _, p := range needDelParams {
delete(metricMap, p)
}
}
}
}
return &formatMetric
}
func ReformatNodeStatusField(nodeMetric *FormatedMetric) *FormatedMetric {
metricCount := len(nodeMetric.Data.Result)
for i := 0; i < metricCount; i++ {
metric, exist := nodeMetric.Data.Result[i][ResultItemMetric]
if exist {
status, exist := metric.(map[string]interface{})[MetricStatus]
if exist {
status = UpperFirstLetter(status.(string))
metric.(map[string]interface{})[MetricStatus] = status
}
}
}
return nodeMetric
}
func UpperFirstLetter(str string) string {
for i, ch := range str {
return string(unicode.ToUpper(ch)) + str[i+1:]
}
return ""
}

View File

@@ -24,6 +24,10 @@ import (
clientV1 "k8s.io/client-go/listers/core/v1"
"k8s.io/kubernetes/pkg/util/slice"
"github.com/golang/glog"
"sort"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models/controllers"
@@ -31,6 +35,10 @@ import (
ksErr "kubesphere.io/kubesphere/pkg/util/errors"
)
const (
WorkspaceKey = "kubesphere.io/workspace"
)
var WorkSpaceRoles = []string{"admin", "operator", "viewer"}
func UnBindDevopsProject(workspace string, devops string) error {
@@ -48,6 +56,7 @@ func DeleteDevopsProject(username string, devops string) error {
if err != nil {
return err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
return err
@@ -58,7 +67,7 @@ func DeleteDevopsProject(username string, devops string) error {
return nil
}
func CreateDevopsProject(username string, devops DevopsProject) (*DevopsProject, error) {
func CreateDevopsProject(username string, workspace string, devops DevopsProject) (*DevopsProject, error) {
data, err := json.Marshal(devops)
@@ -75,6 +84,7 @@ func CreateDevopsProject(username string, devops DevopsProject) (*DevopsProject,
return nil, err
}
defer result.Body.Close()
data, err = ioutil.ReadAll(result.Body)
if err != nil {
@@ -93,21 +103,121 @@ func CreateDevopsProject(username string, devops DevopsProject) (*DevopsProject,
return nil, err
}
err = BindingDevopsProject(workspace, *project.ProjectId)
if err != nil {
DeleteDevopsProject(username, *project.ProjectId)
return nil, err
}
go createDefaultDevopsRoleBinding(workspace, project)
return &project, nil
}
func ListNamespaceByUser(workspaceName string, username string) ([]*core.Namespace, error) {
func createDefaultDevopsRoleBinding(workspace string, project DevopsProject) {
admins := iam.GetWorkspaceUsers(workspace, "admin")
for _, admin := range admins {
createDevopsRoleBinding(workspace, *project.ProjectId, admin, "maintainer")
}
viewers := iam.GetWorkspaceUsers(workspace, "viewer")
for _, viewer := range viewers {
createDevopsRoleBinding(workspace, *project.ProjectId, viewer, "reporter")
}
}
func deleteDevopsRoleBinding(workspace string, projectId string, user string) {
projects := make([]string, 0)
if projectId != "" {
projects = append(projects, projectId)
} else {
p, err := GetDevOpsProjects(workspace)
if err != nil {
glog.Warning("delete devops role binding failed", workspace, projectId, user)
return
}
projects = append(projects, p...)
}
for _, project := range projects {
request, _ := http.NewRequest(http.MethodDelete, fmt.Sprintf("http://%s/api/v1alpha/projects/%s/members/%s", constants.DevopsAPIServer, project, user), nil)
request.Header.Add("X-Token-Username", "admin")
resp, err := http.DefaultClient.Do(request)
if err != nil || resp.StatusCode > 200 {
glog.Warning("delete devops role binding failed", workspace, project, user)
}
}
}
func createDevopsRoleBinding(workspace string, projectId string, user string, role string) {
projects := make([]string, 0)
if projectId != "" {
projects = append(projects, projectId)
} else {
p, err := GetDevOpsProjects(workspace)
if err != nil {
glog.Warning("create devops role binding failed", workspace, projectId, user, role)
return
}
projects = append(projects, p...)
}
for _, project := range projects {
data := []byte(fmt.Sprintf(`{"username":"%s","role":"%s"}`, user, role))
request, _ := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/api/v1alpha/projects/%s/members", constants.DevopsAPIServer, project), bytes.NewReader(data))
request.Header.Add("Content-Type", "application/json")
request.Header.Add("X-Token-Username", "admin")
resp, err := http.DefaultClient.Do(request)
if err != nil || resp.StatusCode > 200 {
glog.Warning(fmt.Sprintf("create devops role binding failed %s,%s,%s,%s", workspace, project, user, role))
}
}
}
func ListNamespaceByUser(workspaceName string, username string, keyword string, orderBy string, reverse bool, limit int, offset int) (int, []*core.Namespace, error) {
namespaces, err := Namespaces(workspaceName)
if err != nil {
return nil, err
return 0, nil, err
}
if keyword != "" {
for i := 0; i < len(namespaces); i++ {
if !strings.Contains(namespaces[i].Name, keyword) {
namespaces = append(namespaces[:i], namespaces[i+1:]...)
i--
}
}
}
sort.Slice(namespaces, func(i, j int) bool {
switch orderBy {
case "name":
if reverse {
return namespaces[i].Name < namespaces[j].Name
} else {
return namespaces[i].Name > namespaces[j].Name
}
default:
if reverse {
return namespaces[i].CreationTimestamp.Time.After(namespaces[j].CreationTimestamp.Time)
} else {
return namespaces[i].CreationTimestamp.Time.Before(namespaces[j].CreationTimestamp.Time)
}
}
})
clusterRoles, err := iam.GetClusterRoles(username)
if err != nil {
return nil, err
return 0, nil, err
}
rules := make([]v1.PolicyRule, 0)
@@ -118,13 +228,11 @@ func ListNamespaceByUser(workspaceName string, username string) ([]*core.Namespa
namespacesManager := v1.PolicyRule{APIGroups: []string{"kubesphere.io"}, ResourceNames: []string{workspaceName}, Verbs: []string{"get"}, Resources: []string{"workspaces/namespaces"}}
if iam.RulesMatchesRequired(rules, namespacesManager) {
return namespaces, nil
} else {
if !iam.RulesMatchesRequired(rules, namespacesManager) {
for i := 0; i < len(namespaces); i++ {
roles, err := iam.GetRoles(namespaces[i].Name, username)
if err != nil {
return nil, err
return 0, nil, err
}
rules := make([]v1.PolicyRule, 0)
for _, role := range roles {
@@ -137,7 +245,13 @@ func ListNamespaceByUser(workspaceName string, username string) ([]*core.Namespa
}
}
return namespaces, nil
if len(namespaces) < offset {
return len(namespaces), namespaces, nil
} else if len(namespaces) < limit+offset {
return len(namespaces), namespaces[offset:], nil
} else {
return len(namespaces), namespaces[offset : limit+offset], nil
}
}
func Namespaces(workspaceName string) ([]*core.Namespace, error) {
@@ -196,6 +310,7 @@ func Delete(workspace *Workspace) error {
return err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
@@ -219,7 +334,7 @@ func release(workspace *Workspace) error {
for _, devops := range workspace.DevopsProjects {
err := DeleteDevopsProject(workspace.Creator, devops)
if err != nil {
if err != nil && !strings.Contains(err.Error(), "not found") {
return err
}
}
@@ -264,7 +379,7 @@ func Create(workspace *Workspace) (*Workspace, error) {
if err != nil {
return nil, err
}
defer result.Body.Close()
data, err = ioutil.ReadAll(result.Body)
if err != nil {
@@ -313,6 +428,7 @@ func Edit(workspace *Workspace) (*Workspace, error) {
return nil, err
}
defer result.Body.Close()
data, err = ioutil.ReadAll(result.Body)
if err != nil {
@@ -342,6 +458,7 @@ func Detail(name string) (*Workspace, error) {
return nil, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
@@ -373,7 +490,7 @@ func Detail(name string) (*Workspace, error) {
}
// List all workspaces for the current user
func ListByUser(username string) ([]*Workspace, error) {
func ListWorkspaceByUser(username string, keyword string) ([]*Workspace, error) {
clusterRoles, err := iam.GetClusterRoles(username)
@@ -389,24 +506,31 @@ func ListByUser(username string) ([]*Workspace, error) {
workspacesManager := v1.PolicyRule{APIGroups: []string{"kubesphere.io"}, Verbs: []string{"list", "get"}, Resources: []string{"workspaces"}}
var workspaces []*Workspace
if iam.RulesMatchesRequired(rules, workspacesManager) {
return fetch(nil)
workspaces, err = fetch(nil)
} else {
workspaceNames := make([]string, 0)
for _, clusterRole := range clusterRoles {
if regexp.MustCompile("^system:\\w+:(admin|operator|viewer)$").MatchString(clusterRole.Name) {
arr := strings.Split(clusterRole.Name, ":")
workspaceNames = append(workspaceNames, arr[1])
if groups := regexp.MustCompile(`^system:(\w+):(admin|operator|viewer)$`).FindStringSubmatch(clusterRole.Name); len(groups) == 3 {
if !slice.ContainsString(workspaceNames, groups[1], nil) {
workspaceNames = append(workspaceNames, groups[1])
}
}
}
if len(workspaceNames) == 0 {
return make([]*Workspace, 0), nil
}
return fetch(workspaceNames)
workspaces, err = fetch(workspaceNames)
}
if keyword != "" {
for i := 0; i < len(workspaces); i++ {
if !strings.Contains(workspaces[i].Name, keyword) {
workspaces = append(workspaces[:i], workspaces[i+1:]...)
i--
}
}
}
return workspaces, err
}
func fetch(names []string) ([]*Workspace, error) {
@@ -414,7 +538,11 @@ func fetch(names []string) ([]*Workspace, error) {
url := fmt.Sprintf("http://%s/apis/account.kubesphere.io/v1alpha1/groups", constants.AccountAPIServer)
if names != nil {
url = url + "?path=" + strings.Join(names, ",")
if len(names) == 0 {
return make([]*Workspace, 0), nil
} else {
url = url + "?path=" + strings.Join(names, ",")
}
}
result, err := http.Get(url)
@@ -423,6 +551,7 @@ func fetch(names []string) ([]*Workspace, error) {
return nil, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
@@ -457,7 +586,7 @@ func fetch(names []string) ([]*Workspace, error) {
return workspaces, nil
}
func DevopsProjects(workspace string) ([]DevopsProject, error) {
func ListDevopsProjectsByUser(username string, workspace string, keyword string, orderBy string, reverse bool, limit int, offset int) (int, []DevopsProject, error) {
db := client.NewSharedDBClient()
defer db.Close()
@@ -465,48 +594,89 @@ func DevopsProjects(workspace string) ([]DevopsProject, error) {
var workspaceDOPBindings []WorkspaceDPBinding
if err := db.Where("workspace = ?", workspace).Find(&workspaceDOPBindings).Error; err != nil {
return nil, err
return 0, nil, err
}
devOpsProjects := make([]DevopsProject, 0)
for _, workspaceDOPBinding := range workspaceDOPBindings {
request, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v1alpha/projects/%s", constants.DevopsAPIServer, workspaceDOPBinding.DevOpsProject), nil)
request.Header.Add("X-Token-Username", "admin")
request, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v1alpha/projects", constants.DevopsAPIServer), nil)
request.Header.Add("X-Token-Username", username)
result, err := http.DefaultClient.Do(request)
if err != nil {
return nil, err
}
data, err := ioutil.ReadAll(result.Body)
result, err := http.DefaultClient.Do(request)
if err != nil {
return 0, nil, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
return nil, err
}
if result.StatusCode == 403 || result.StatusCode == 404 {
if err := db.Delete(&workspaceDOPBinding).Error; err != nil {
return nil, err
}
continue
}
if result.StatusCode > 200 {
return nil, ksErr.Wrap(data)
}
var project DevopsProject
err = json.Unmarshal(data, &project)
if err != nil {
return nil, err
}
devOpsProjects = append(devOpsProjects, project)
if err != nil {
return 0, nil, err
}
return devOpsProjects, nil
//if result.StatusCode == 403 || result.StatusCode == 404 {
// if err := db.Delete(&workspaceDOPBinding).Error; err != nil {
// return nil, err
// }
// continue
//}
if result.StatusCode > 200 {
return 0, nil, ksErr.Wrap(data)
}
err = json.Unmarshal(data, &devOpsProjects)
if err != nil {
return 0, nil, err
}
if keyword != "" {
for i := 0; i < len(devOpsProjects); i++ {
if !strings.Contains(devOpsProjects[i].Name, keyword) {
devOpsProjects = append(devOpsProjects[:i], devOpsProjects[i+1:]...)
i--
}
}
}
sort.Slice(devOpsProjects, func(i, j int) bool {
switch orderBy {
case "name":
if reverse {
return devOpsProjects[i].Name < devOpsProjects[j].Name
} else {
return devOpsProjects[i].Name > devOpsProjects[j].Name
}
default:
if reverse {
return devOpsProjects[i].CreateTime.After(*devOpsProjects[j].CreateTime)
} else {
return devOpsProjects[i].CreateTime.Before(*devOpsProjects[j].CreateTime)
}
}
})
for i := 0; i < len(devOpsProjects); i++ {
inWorkspace := false
for _, binding := range workspaceDOPBindings {
if binding.DevOpsProject == *devOpsProjects[i].ProjectId {
inWorkspace = true
}
}
if !inWorkspace {
devOpsProjects = append(devOpsProjects[:i], devOpsProjects[i+1:]...)
i--
}
}
if len(devOpsProjects) < offset {
return len(devOpsProjects), devOpsProjects, nil
} else if len(devOpsProjects) < limit+offset {
return len(devOpsProjects), devOpsProjects[offset:], nil
} else {
return len(devOpsProjects), devOpsProjects[offset : limit+offset], nil
}
}
func convertGroupToWorkspace(db *gorm.DB, group Group) (*Workspace, error) {
namespaces, err := Namespaces(group.Name)
@@ -578,6 +748,19 @@ func Invite(workspaceName string, users []UserInvite) error {
return nil
}
func NamespaceExistCheck(namespaceName string) (bool, error) {
_, err := client.NewK8sClient().CoreV1().Namespaces().Get(namespaceName, meta_v1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
} else {
return false, err
}
}
return true, nil
}
func RemoveMembers(workspaceName string, users []string) error {
workspace, err := Detail(workspaceName)
@@ -638,6 +821,7 @@ func GetWorkspaceMembers(workspace string) ([]iam.User, error) {
return nil, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
@@ -667,133 +851,33 @@ func WorkspaceRoleInit(workspace *Workspace) error {
admin.Name = fmt.Sprintf("system:%s:admin", workspace.Name)
admin.Kind = iam.ClusterRoleKind
admin.Rules = []v1.PolicyRule{
// apis/kubesphere.io/v1alpha1/workspaces/sample
// apis/kubesphere.io/v1alpha1/workspaces/sample/namespaces
// apis/kubesphere.io/v1alpha1/workspaces/sample/devops
// apis/kubesphere.io/v1alpha1/workspaces/sample/roles
// apis/kubesphere.io/v1alpha1/workspaces/sample/members
// apis/kubesphere.io/v1alpha1/workspaces/sample/members/admin
{
Verbs: []string{"*"},
APIGroups: []string{"kubesphere.io", "account.kubesphere.io"},
ResourceNames: []string{workspace.Name},
Resources: []string{"workspaces", "workspaces/*"},
},
// post apis/kubesphere.io/v1alpha1/workspaces/sample/namespaces
{
Verbs: []string{"create"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{workspace.Name},
Resources: []string{"workspaces/namespaces"},
},
// post apis/kubesphere.io/v1alpha1/workspaces/sample/members
{
Verbs: []string{"create"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{workspace.Name},
Resources: []string{"workspaces/members"},
},
// post apis/kubesphere.io/v1alpha1/workspaces/sample/devops
{
Verbs: []string{"create"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{workspace.Name},
Resources: []string{"workspaces/devops"},
},
// TODO have risks
// get apis/apps/v1/namespaces/proj1/deployments/?labelSelector
// post api/v1/namespaces/project-0vya57/limitranges
{
Verbs: []string{"*"},
APIGroups: []string{"", "apps", "extensions", "batch"},
Resources: []string{"limitranges", "deployments", "configmaps", "secrets", "jobs", "cronjobs", "persistentvolumes", "statefulsets", "daemonsets", "ingresses", "services", "pods/*", "pods", "events", "deployments/scale"},
APIGroups: []string{"devops.kubesphere.io", "jenkins.kubesphere.io"},
Resources: []string{"*"},
},
// get apis/kubesphere.io/v1alpha1/quota/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"quota/*"},
},
// get api/v1/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{""},
Resources: []string{"namespaces", "serviceaccounts", "configmaps"},
},
// get api/v1/namespaces/proj1/serviceaccounts
// get api/v1/namespaces/proj1/configmaps
// get api/v1/namespaces/proj1/secrets
{
Verbs: []string{"list"},
APIGroups: []string{""},
Resources: []string{"serviceaccounts", "configmaps", "secrets"},
},
// get apis/kubesphere.io/v1alpha1/status/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{"namespaces"},
Resources: []string{"status/*"},
Resources: []string{"status/*", "monitoring/*", "quota/*"},
},
// apis/kubesphere.io/v1alpha1/namespaces/proj1/router
{
Verbs: []string{"list"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"router"},
},
// get apis/kubesphere.io/v1alpha1/registries/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"registries"},
},
// get apis/kubesphere.io/v1alpha1/monitoring/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{"namespaces"},
Resources: []string{"monitoring/*"},
},
// get apis/kubesphere.io/v1alpha1/resources/persistent-volume-claims
// get apis/kubesphere.io/v1alpha1/resources/deployments
// get apis/kubesphere.io/v1alpha1/resources/statefulsets
// get apis/kubesphere.io/v1alpha1/resources/daemonsets
// get apis/kubesphere.io/v1alpha1/resources/jobs
// get apis/kubesphere.io/v1alpha1/resources/cronjobs
// get apis/kubesphere.io/v1alpha1/resources/persistent-volume-claims
// get apis/kubesphere.io/v1alpha1/resources/services
// get apis/kubesphere.io/v1alpha1/resources/ingresses
// get apis/kubesphere.io/v1alpha1/resources/secrets
// get apis/kubesphere.io/v1alpha1/resources/configmaps
// get apis/kubesphere.io/v1alpha1/resources/roles
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"resources"},
},
// apis/account.kubesphere.io/v1alpha1/users
// apis/account.kubesphere.io/v1alpha1/namespaces/proj1/users
{
Verbs: []string{"list"},
APIGroups: []string{"account.kubesphere.io"},
Resources: []string{"users"},
},
// apis/kubesphere.io/v1alpha1/monitoring/workspaces/sample?metrics_filter=
// apis/kubesphere.io/v1alpha1/monitoring/workspaces/sample/pods?step=30m
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
@@ -814,15 +898,42 @@ func WorkspaceRoleInit(workspace *Workspace) error {
Resources: []string{"workspaces"},
ResourceNames: []string{workspace.Name},
}, {
Verbs: []string{"create", "get"},
Verbs: []string{"create"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/namespaces", "workspaces/devops"},
ResourceNames: []string{workspace.Name},
},
{
Verbs: []string{"delete"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"workspaces/namespaces", "workspaces/devops"},
ResourceNames: []string{workspace.Name},
},
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{"namespaces"},
Resources: []string{"quota/*", "status/*", "monitoring/*"},
},
{
Verbs: []string{"*"},
APIGroups: []string{"devops.kubesphere.io"},
Resources: []string{"*"},
}, {
Verbs: []string{"*"},
APIGroups: []string{"jenkins.kubesphere.io"},
Resources: []string{"*"},
},
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"registries"},
Resources: []string{"resources"},
},
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{workspace.Name},
Resources: []string{"workspaces/members"},
},
}
@@ -832,139 +943,38 @@ func WorkspaceRoleInit(workspace *Workspace) error {
viewer.Name = fmt.Sprintf("system:%s:viewer", workspace.Name)
viewer.Kind = iam.ClusterRoleKind
viewer.Rules = []v1.PolicyRule{
// apis/kubesphere.io/v1alpha1/workspaces/sample
// apis/kubesphere.io/v1alpha1/workspaces/sample/namespaces
// apis/kubesphere.io/v1alpha1/workspaces/sample/devops
// apis/kubesphere.io/v1alpha1/workspaces/sample/roles
// apis/kubesphere.io/v1alpha1/workspaces/sample/members
// apis/kubesphere.io/v1alpha1/workspaces/sample/members/admin
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io", "account.kubesphere.io"},
ResourceNames: []string{workspace.Name},
Resources: []string{"workspaces", "workspaces/*"},
},
// post apis/kubesphere.io/v1alpha1/workspaces/sample/namespaces
//{
// Verbs: []string{"create"},
// APIGroups: []string{"kubesphere.io"},
// ResourceNames: []string{workspace.Name},
// Resources: []string{"workspaces/namespaces"},
//},
// post apis/kubesphere.io/v1alpha1/workspaces/sample/members
//{
// Verbs: []string{"create"},
// APIGroups: []string{"kubesphere.io"},
// ResourceNames: []string{workspace.Name},
// Resources: []string{"workspaces/members"},
//},
// post apis/kubesphere.io/v1alpha1/workspaces/sample/devops
//{
// Verbs: []string{"create"},
// APIGroups: []string{"kubesphere.io"},
// ResourceNames: []string{workspace.Name},
// Resources: []string{"workspaces/devops"},
//},
// TODO have risks
// get apis/apps/v1/namespaces/proj1/deployments/?labelSelector
// post api/v1/namespaces/project-0vya57/limitranges
{
Verbs: []string{"get", "list"},
APIGroups: []string{"", "apps", "extensions", "batch"},
Resources: []string{"limitranges", "deployments", "configmaps", "secrets", "jobs", "cronjobs", "persistentvolumes", "statefulsets", "daemonsets", "ingresses", "services", "pods/*", "pods", "events", "deployments/scale"},
},
// get apis/kubesphere.io/v1alpha1/quota/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"quota/*"},
},
// get api/v1/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{""},
Resources: []string{"namespaces", "serviceaccounts", "configmaps"},
},
// get api/v1/namespaces/proj1/serviceaccounts
// get api/v1/namespaces/proj1/configmaps
// get api/v1/namespaces/proj1/secrets
{
Verbs: []string{"list"},
APIGroups: []string{""},
Resources: []string{"serviceaccounts", "configmaps", "secrets"},
},
// get apis/kubesphere.io/v1alpha1/status/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{"namespaces"},
Resources: []string{"status/*"},
Resources: []string{"quota/*", "status/*", "monitoring/*"},
},
// apis/kubesphere.io/v1alpha1/namespaces/proj1/router
{
Verbs: []string{"list"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"router"},
},
// get apis/kubesphere.io/v1alpha1/registries/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"registries"},
},
// get apis/kubesphere.io/v1alpha1/monitoring/namespaces/proj1
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{"namespaces"},
Resources: []string{"monitoring/*"},
},
// get apis/kubesphere.io/v1alpha1/resources/persistent-volume-claims
// get apis/kubesphere.io/v1alpha1/resources/deployments
// get apis/kubesphere.io/v1alpha1/resources/statefulsets
// get apis/kubesphere.io/v1alpha1/resources/daemonsets
// get apis/kubesphere.io/v1alpha1/resources/jobs
// get apis/kubesphere.io/v1alpha1/resources/cronjobs
// get apis/kubesphere.io/v1alpha1/resources/persistent-volume-claims
// get apis/kubesphere.io/v1alpha1/resources/services
// get apis/kubesphere.io/v1alpha1/resources/ingresses
// get apis/kubesphere.io/v1alpha1/resources/secrets
// get apis/kubesphere.io/v1alpha1/resources/configmaps
// get apis/kubesphere.io/v1alpha1/resources/roles
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
Resources: []string{"resources"},
},
// apis/account.kubesphere.io/v1alpha1/users
// apis/account.kubesphere.io/v1alpha1/namespaces/proj1/users
{
Verbs: []string{"list"},
APIGroups: []string{"account.kubesphere.io"},
Resources: []string{"users"},
},
// apis/kubesphere.io/v1alpha1/monitoring/workspaces/sample?metrics_filter=
// apis/kubesphere.io/v1alpha1/monitoring/workspaces/sample/pods?step=30m
{
Verbs: []string{"get"},
APIGroups: []string{"kubesphere.io"},
ResourceNames: []string{"workspaces"},
Resources: []string{"monitoring/" + workspace.Name},
},
{
Verbs: []string{"get", "list"},
APIGroups: []string{"devops.kubesphere.io"},
Resources: []string{"*"},
}, {
Verbs: []string{"get", "list"},
APIGroups: []string{"jenkins.kubesphere.io"},
Resources: []string{"*"},
},
}
viewer.Labels = map[string]string{"creator": "system"}
@@ -1135,8 +1145,8 @@ func CreateWorkspaceRoleBinding(workspace *Workspace, username string, role stri
} else {
modify = true
roleBinding.Subjects = append(roleBinding.Subjects[:i], roleBinding.Subjects[i+1:]...)
if err != nil {
return err
if roleName == "admin" || roleName == "viewer" {
go deleteDevopsRoleBinding(workspace.Name, "", username)
}
break
}
@@ -1146,6 +1156,11 @@ func CreateWorkspaceRoleBinding(workspace *Workspace, username string, role stri
if roleName == role {
modify = true
roleBinding.Subjects = append(roleBinding.Subjects, v1.Subject{Kind: v1.UserKind, Name: username})
if roleName == "admin" {
go createDevopsRoleBinding(workspace.Name, "", username, "maintainer")
} else if roleName == "viewer" {
go createDevopsRoleBinding(workspace.Name, "", username, "reporter")
}
}
if !modify {
@@ -1161,14 +1176,14 @@ func CreateWorkspaceRoleBinding(workspace *Workspace, username string, role stri
return nil
}
func GetDevOpsProjects(name string) ([]string, error) {
func GetDevOpsProjects(workspaceName string) ([]string, error) {
db := client.NewSharedDBClient()
defer db.Close()
var workspaceDOPBindings []WorkspaceDPBinding
if err := db.Where("workspace = ?", name).Find(&workspaceDOPBindings).Error; err != nil {
if err := db.Where("workspace = ?", workspaceName).Find(&workspaceDOPBindings).Error; err != nil {
return nil, err
}
@@ -1215,6 +1230,7 @@ func CountAll() (int, error) {
return 0, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
return 0, err
@@ -1229,7 +1245,7 @@ func CountAll() (int, error) {
if err != nil {
return 0, err
}
val, ok := count["total"]
val, ok := count["total_count"]
if !ok {
return 0, errors.New("not found")
@@ -1264,7 +1280,7 @@ func GetAllDevOpsProjectsNums() (int, error) {
defer db.Close()
var count int
if err := db.Find(&WorkspaceDPBinding{}).Count(&count).Error; err != nil {
if err := db.Model(&WorkspaceDPBinding{}).Count(&count).Error; err != nil {
return 0, err
}
return count, nil
@@ -1277,6 +1293,7 @@ func GetAllAccountNums() (int, error) {
return 0, err
}
defer result.Body.Close()
data, err := ioutil.ReadAll(result.Body)
if err != nil {
return 0, err
@@ -1291,7 +1308,7 @@ func GetAllAccountNums() (int, error) {
if err != nil {
return 0, err
}
val, ok := count["total"]
val, ok := count["total_count"]
if !ok {
return 0, errors.New("not found")
@@ -1307,3 +1324,37 @@ func GetAllAccountNums() (int, error) {
}
return 0, errors.New("not found")
}
// get cluster organizations name which contains at least one namespace,
func GetAllOrgAndProjList() (map[string][]string, map[string]string, error) {
nsList, err := client.NewK8sClient().CoreV1().Namespaces().List(meta_v1.ListOptions{})
if err != nil {
glog.Errorln(err)
return nil, nil, err
}
var workspaceNamespaceMap = make(map[string][]string)
var namespaceWorkspaceMap = make(map[string]string)
for _, item := range nsList.Items {
ws, exist := item.Labels[WorkspaceKey]
ns := item.Name
if exist {
if nsArray, exist := workspaceNamespaceMap[ws]; exist {
nsArray = append(nsArray, ns)
workspaceNamespaceMap[ws] = nsArray
} else {
var nsArray []string
nsArray = append(nsArray, ns)
workspaceNamespaceMap[ws] = nsArray
}
namespaceWorkspaceMap[ns] = ws
} else {
// this namespace do not belong to any workspaces
namespaceWorkspaceMap[ns] = ""
}
}
return workspaceNamespaceMap, namespaceWorkspaceMap, nil
}