monitor: add tests

Signed-off-by: huanggze <loganhuang@yunify.com>
This commit is contained in:
zryfish
2020-04-01 17:41:50 +08:00
committed by huanggze
parent 17013d3519
commit 372a52e70e
63 changed files with 5405 additions and 4462 deletions

View File

@@ -28,7 +28,7 @@ import (
configv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/config/v1alpha2"
iamv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2"
monitoringv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha2"
monitoringv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha3"
"kubesphere.io/kubesphere/pkg/kapis/oauth"
openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1"
operationsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/operations/v1alpha2"
@@ -138,7 +138,7 @@ func (s *APIServer) installKubeSphereAPIs() {
// Need to refactor devops api registration, too much dependencies
//urlruntime.Must(devopsv1alpha2.AddToContainer(s.container, s.DevopsClient, s.DBClient.Database(), nil, s.KubernetesClient.KubeSphere(), s.InformerFactory.KubeSphereSharedInformerFactory(), s.S3Client))
urlruntime.Must(loggingv1alpha2.AddToContainer(s.container, s.KubernetesClient, s.LoggingClient))
urlruntime.Must(monitoringv1alpha2.AddToContainer(s.container, s.KubernetesClient, s.MonitoringClient))
urlruntime.Must(monitoringv1alpha3.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.MonitoringClient))
urlruntime.Must(openpitrixv1.AddToContainer(s.container, s.InformerFactory, s.OpenpitrixClient))
urlruntime.Must(operationsv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes()))
urlruntime.Must(resourcesv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.InformerFactory))

View File

@@ -1,145 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"github.com/emicklei/go-restful"
"kubesphere.io/kubesphere/pkg/api"
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
)
type handler struct {
k k8s.Client
mo model.MonitoringOperator
}
func newHandler(k k8s.Client, m monitoring.Interface) *handler {
return &handler{k, model.NewMonitoringOperator(m)}
}
func (h handler) handleClusterMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelCluster)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleNodeMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelNode)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleWorkspaceMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelWorkspace)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleNamespaceMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelNamespace)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleWorkloadMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelWorkload)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handlePodMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelPod)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleContainerMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelContainer)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handlePVCMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelPVC)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleComponentMetricsQuery(req *restful.Request, resp *restful.Response) {
p, err := h.parseRequestParams(req, monitoring.LevelComponent)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, p)
}
func (h handler) handleNamedMetricsQuery(resp *restful.Response, p params) {
var res v1alpha2.APIResponse
var err error
if p.isRangeQuery() {
res, err = h.mo.GetNamedMetricsOverTime(p.start, p.end, p.step, p.option)
if err != nil {
api.HandleInternalError(resp, nil, err)
return
}
} else {
res, err = h.mo.GetNamedMetrics(p.time, p.option)
if err != nil {
api.HandleInternalError(resp, nil, err)
return
}
if p.shouldSort() {
var rows int
res, rows = h.mo.SortMetrics(res, p.target, p.order, p.identifier)
res = h.mo.PageMetrics(res, p.page, p.limit, rows)
}
}
resp.WriteAsJson(res)
}

View File

@@ -1,217 +0,0 @@
package v1alpha2
import (
"fmt"
"github.com/emicklei/go-restful"
"github.com/pkg/errors"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"strconv"
"time"
)
const (
DefaultStep = 10 * time.Minute
DefaultFilter = ".*"
DefaultOrder = model.OrderDescending
DefaultPage = 1
DefaultLimit = 5
)
type params struct {
time time.Time
start, end time.Time
step time.Duration
target string
identifier string
order string
page int
limit int
option monitoring.QueryOption
}
func (p params) isRangeQuery() bool {
return !p.time.IsZero()
}
func (p params) shouldSort() bool {
return p.target != ""
}
func (h handler) parseRequestParams(req *restful.Request, lvl monitoring.MonitoringLevel) (params, error) {
timestamp := req.QueryParameter("time")
start := req.QueryParameter("start")
end := req.QueryParameter("end")
step := req.QueryParameter("step")
target := req.QueryParameter("sort_metric")
order := req.QueryParameter("sort_type")
page := req.QueryParameter("page")
limit := req.QueryParameter("limit")
metricFilter := req.QueryParameter("metrics_filter")
resourceFilter := req.QueryParameter("resources_filter")
nodeName := req.PathParameter("node")
workspaceName := req.PathParameter("workspace")
namespaceName := req.PathParameter("namespace")
workloadKind := req.PathParameter("kind")
workloadName := req.PathParameter("workload")
podName := req.PathParameter("pod")
containerName := req.PathParameter("container")
pvcName := req.PathParameter("pvc")
storageClassName := req.PathParameter("storageclass")
componentType := req.PathParameter("component")
var p params
var err error
if start != "" && end != "" {
p.start, err = time.Parse(time.RFC3339, start)
if err != nil {
return p, err
}
p.end, err = time.Parse(time.RFC3339, end)
if err != nil {
return p, err
}
if step == "" {
p.step = DefaultStep
} else {
p.step, err = time.ParseDuration(step)
if err != nil {
return p, err
}
}
} else if start == "" && end == "" {
if timestamp == "" {
p.time = time.Now()
} else {
p.time, err = time.Parse(time.RFC3339, req.QueryParameter("time"))
if err != nil {
return p, err
}
}
} else {
return p, errors.Errorf("'time' and the combination of 'start' and 'end' are mutually exclusive.")
}
// hide metrics from a deleted namespace having the same name
namespace := req.QueryParameter("namespace")
if req.QueryParameter("namespace") != "" {
ns, err := h.k.Kubernetes().CoreV1().Namespaces().Get(namespace, corev1.GetOptions{})
if err != nil {
return p, err
}
cts := ns.CreationTimestamp.Time
if p.start.Before(cts) {
p.start = cts
}
if p.end.Before(cts) {
return p, errors.Errorf("End timestamp must not be before namespace creation time.")
}
}
if resourceFilter == "" {
resourceFilter = DefaultFilter
}
if metricFilter == "" {
metricFilter = DefaultFilter
}
if componentType != "" {
metricFilter = fmt.Sprintf("/^(?=.*%s)(?=.*%s)/s", componentType, metricFilter)
}
// should sort
if target != "" {
p.page = DefaultPage
p.limit = DefaultLimit
if order != model.OrderAscending {
p.order = DefaultOrder
}
if page != "" {
p.page, err = strconv.Atoi(req.QueryParameter("page"))
if err != nil || p.page <= 0 {
return p, errors.Errorf("Invalid parameter 'page'.")
}
}
if limit != "" {
p.limit, err = strconv.Atoi(req.QueryParameter("limit"))
if err != nil || p.limit <= 0 {
return p, errors.Errorf("Invalid parameter 'limit'.")
}
}
}
switch lvl {
case monitoring.LevelCluster:
p.option = monitoring.ClusterOption{MetricFilter: metricFilter}
case monitoring.LevelNode:
p.identifier = model.IdentifierNode
p.option = monitoring.NodeOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NodeName: nodeName,
}
case monitoring.LevelWorkspace:
p.identifier = model.IdentifierWorkspace
p.option = monitoring.WorkspaceOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
WorkspaceName: workspaceName,
}
case monitoring.LevelNamespace:
p.identifier = model.IdentifierNamespace
p.option = monitoring.NamespaceOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
WorkspaceName: workspaceName,
NamespaceName: namespaceName,
}
case monitoring.LevelWorkload:
p.identifier = model.IdentifierWorkload
p.option = monitoring.WorkloadOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NamespaceName: namespaceName,
WorkloadKind: workloadKind,
WorkloadName: workloadName,
}
case monitoring.LevelPod:
p.identifier = model.IdentifierPod
p.option = monitoring.PodOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NodeName: nodeName,
NamespaceName: namespaceName,
WorkloadKind: workloadKind,
WorkloadName: workloadName,
PodName: podName,
}
case monitoring.LevelContainer:
p.identifier = model.IdentifierContainer
p.option = monitoring.ContainerOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NamespaceName: namespaceName,
PodName: podName,
ContainerName: containerName,
}
case monitoring.LevelPVC:
p.identifier = model.IdentifierPVC
p.option = monitoring.PVCOption{
MetricFilter: metricFilter,
ResourceFilter: resourceFilter,
NamespaceName: namespaceName,
StorageClassName: storageClassName,
PersistentVolumeClaimName: pvcName,
}
case monitoring.LevelComponent:
p.option = monitoring.ComponentOption{
MetricFilter: metricFilter,
}
}
return p, nil
}

View File

@@ -0,0 +1,194 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"github.com/emicklei/go-restful"
"k8s.io/client-go/kubernetes"
"kubesphere.io/kubesphere/pkg/api"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"regexp"
)
type handler struct {
k kubernetes.Interface
mo model.MonitoringOperator
}
func newHandler(k kubernetes.Interface, m monitoring.Interface) *handler {
return &handler{k, model.NewMonitoringOperator(m)}
}
func (h handler) handleClusterMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelCluster)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleNodeMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelNode)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleWorkspaceMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelWorkspace)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleNamespaceMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelNamespace)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleWorkloadMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelWorkload)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handlePodMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelPod)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleContainerMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelContainer)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handlePVCMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelPVC)
if err != nil {
if err.Error() == ErrNoHit {
res := handleNoHit(opt.namedMetrics)
resp.WriteAsJson(res)
return
}
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func (h handler) handleComponentMetricsQuery(req *restful.Request, resp *restful.Response) {
params := parseRequestParams(req)
opt, err := h.makeQueryOptions(params, monitoring.LevelComponent)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
}
h.handleNamedMetricsQuery(resp, opt)
}
func handleNoHit(namedMetrics []string) model.Metrics {
var res model.Metrics
for _, metic := range namedMetrics {
res.Results = append(res.Results, monitoring.Metric{
MetricName: metic,
MetricData: monitoring.MetricData{},
})
}
return res
}
func (h handler) handleNamedMetricsQuery(resp *restful.Response, q queryOptions) {
var res model.Metrics
var metrics []string
for _, metric := range q.namedMetrics {
ok, _ := regexp.MatchString(q.metricFilter, metric)
if ok {
metrics = append(metrics, metric)
}
}
if len(metrics) == 0 {
resp.WriteAsJson(res)
return
}
if q.isRangeQuery() {
res = h.mo.GetNamedMetricsOverTime(metrics, q.start, q.end, q.step, q.option)
} else {
res = h.mo.GetNamedMetrics(metrics, q.time, q.option)
if q.shouldSort() {
res = *res.Sort(q.target, q.order, q.identifier).Page(q.page, q.limit)
}
}
resp.WriteAsJson(res)
}

View File

@@ -0,0 +1,268 @@
package v1alpha3
import (
"github.com/emicklei/go-restful"
"github.com/pkg/errors"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"strconv"
"time"
)
const (
DefaultStep = 10 * time.Minute
DefaultFilter = ".*"
DefaultOrder = model.OrderDescending
DefaultPage = 1
DefaultLimit = 5
ComponentEtcd = "etcd"
ComponentAPIServer = "apiserver"
ComponentScheduler = "scheduler"
ErrNoHit = "'end' must be after the namespace creation time."
ErrParamConflict = "'time' and the combination of 'start' and 'end' are mutually exclusive."
ErrInvalidStartEnd = "'start' must be before 'end'."
ErrInvalidPage = "Invalid parameter 'page'."
ErrInvalidLimit = "Invalid parameter 'limit'."
)
type reqParams struct {
time string
start string
end string
step string
target string
order string
page string
limit string
metricFilter string
resourceFilter string
nodeName string
workspaceName string
namespaceName string
workloadKind string
workloadName string
podName string
containerName string
pvcName string
storageClassName string
componentType string
}
type queryOptions struct {
metricFilter string
namedMetrics []string
start time.Time
end time.Time
time time.Time
step time.Duration
target string
identifier string
order string
page int
limit int
option monitoring.QueryOption
}
func (q queryOptions) isRangeQuery() bool {
return !q.time.IsZero()
}
func (q queryOptions) shouldSort() bool {
return q.target != "" && q.identifier != ""
}
func parseRequestParams(req *restful.Request) reqParams {
var r reqParams
r.time = req.QueryParameter("time")
r.start = req.QueryParameter("start")
r.end = req.QueryParameter("end")
r.step = req.QueryParameter("step")
r.target = req.QueryParameter("sort_metric")
r.order = req.QueryParameter("sort_type")
r.page = req.QueryParameter("page")
r.limit = req.QueryParameter("limit")
r.metricFilter = req.QueryParameter("metrics_filter")
r.resourceFilter = req.QueryParameter("resources_filter")
r.nodeName = req.PathParameter("node")
r.workspaceName = req.PathParameter("workspace")
r.namespaceName = req.PathParameter("namespace")
r.workloadKind = req.PathParameter("kind")
r.workloadName = req.PathParameter("workload")
r.podName = req.PathParameter("pod")
r.containerName = req.PathParameter("container")
r.pvcName = req.PathParameter("pvc")
r.storageClassName = req.PathParameter("storageclass")
r.componentType = req.PathParameter("component")
return r
}
func (h handler) makeQueryOptions(r reqParams, lvl monitoring.Level) (q queryOptions, err error) {
if r.resourceFilter == "" {
r.resourceFilter = DefaultFilter
}
q.metricFilter = r.metricFilter
if r.metricFilter == "" {
q.metricFilter = DefaultFilter
}
switch lvl {
case monitoring.LevelCluster:
q.option = monitoring.ClusterOption{}
q.namedMetrics = model.ClusterMetrics
case monitoring.LevelNode:
q.identifier = model.IdentifierNode
q.namedMetrics = model.NodeMetrics
q.option = monitoring.NodeOption{
ResourceFilter: r.resourceFilter,
NodeName: r.nodeName,
}
case monitoring.LevelWorkspace:
q.identifier = model.IdentifierWorkspace
q.namedMetrics = model.WorkspaceMetrics
q.option = monitoring.WorkspaceOption{
ResourceFilter: r.resourceFilter,
WorkspaceName: r.workspaceName,
}
case monitoring.LevelNamespace:
q.identifier = model.IdentifierNamespace
q.namedMetrics = model.NamespaceMetrics
q.option = monitoring.NamespaceOption{
ResourceFilter: r.resourceFilter,
WorkspaceName: r.workspaceName,
NamespaceName: r.namespaceName,
}
case monitoring.LevelWorkload:
q.identifier = model.IdentifierWorkload
q.namedMetrics = model.WorkloadMetrics
q.option = monitoring.WorkloadOption{
ResourceFilter: r.resourceFilter,
NamespaceName: r.namespaceName,
WorkloadKind: r.workloadKind,
}
case monitoring.LevelPod:
q.identifier = model.IdentifierPod
q.namedMetrics = model.PodMetrics
q.option = monitoring.PodOption{
ResourceFilter: r.resourceFilter,
NodeName: r.nodeName,
NamespaceName: r.namespaceName,
WorkloadKind: r.workloadKind,
WorkloadName: r.workloadName,
PodName: r.podName,
}
case monitoring.LevelContainer:
q.identifier = model.IdentifierContainer
q.namedMetrics = model.ContainerMetrics
q.option = monitoring.ContainerOption{
ResourceFilter: r.resourceFilter,
NamespaceName: r.namespaceName,
PodName: r.podName,
ContainerName: r.containerName,
}
case monitoring.LevelPVC:
q.identifier = model.IdentifierPVC
q.namedMetrics = model.PVCMetrics
q.option = monitoring.PVCOption{
ResourceFilter: r.resourceFilter,
NamespaceName: r.namespaceName,
StorageClassName: r.storageClassName,
PersistentVolumeClaimName: r.pvcName,
}
case monitoring.LevelComponent:
q.option = monitoring.ComponentOption{}
switch r.componentType {
case ComponentEtcd:
q.namedMetrics = model.EtcdMetrics
case ComponentAPIServer:
q.namedMetrics = model.APIServerMetrics
case ComponentScheduler:
q.namedMetrics = model.SchedulerMetrics
}
}
// Parse time params
if r.start != "" && r.end != "" {
startInt, err := strconv.ParseInt(r.start, 10, 64)
if err != nil {
return q, err
}
q.start = time.Unix(startInt, 0)
endInt, err := strconv.ParseInt(r.end, 10, 64)
if err != nil {
return q, err
}
q.end = time.Unix(endInt, 0)
if r.step == "" {
q.step = DefaultStep
} else {
q.step, err = time.ParseDuration(r.step)
if err != nil {
return q, err
}
}
if q.start.After(q.end) {
return q, errors.New(ErrInvalidStartEnd)
}
} else if r.start == "" && r.end == "" {
if r.time == "" {
q.time = time.Now()
} else {
timeInt, err := strconv.ParseInt(r.time, 10, 64)
if err != nil {
return q, err
}
q.time = time.Unix(timeInt, 0)
}
} else {
return q, errors.Errorf(ErrParamConflict)
}
// Ensure query start time to be after the namespace creation time
if r.namespaceName != "" {
ns, err := h.k.CoreV1().Namespaces().Get(r.namespaceName, corev1.GetOptions{})
if err != nil {
return q, err
}
cts := ns.CreationTimestamp.Time
if q.start.Before(cts) {
q.start = cts
}
if q.end.Before(cts) {
return q, errors.New(ErrNoHit)
}
}
// Parse sorting and paging params
if r.target != "" {
q.page = DefaultPage
q.limit = DefaultLimit
if q.order != model.OrderAscending {
r.order = DefaultOrder
}
if r.page != "" {
q.page, err = strconv.Atoi(r.page)
if err != nil || q.page <= 0 {
return q, errors.New(ErrInvalidPage)
}
}
if r.limit != "" {
q.limit, err = strconv.Atoi(r.limit)
if err != nil || q.limit <= 0 {
return q, errors.New(ErrInvalidLimit)
}
}
}
return q, nil
}

View File

@@ -0,0 +1,148 @@
package v1alpha3
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"reflect"
"testing"
"time"
)
func TestParseRequestParams(t *testing.T) {
tests := []struct {
params reqParams
lvl monitoring.Level
namespace corev1.Namespace
expected queryOptions
expectedErr bool
}{
{
params: reqParams{
time: "abcdef",
},
lvl: monitoring.LevelCluster,
expectedErr: true,
},
{
params: reqParams{
time: "1585831995",
},
lvl: monitoring.LevelCluster,
expected: queryOptions{
time: time.Unix(1585831995, 0),
metricFilter: ".*",
namedMetrics: model.ClusterMetrics,
option: monitoring.ClusterOption{},
},
expectedErr: false,
},
{
params: reqParams{
start: "1585830000",
end: "1585839999",
step: "1m",
namespaceName: "default",
},
lvl: monitoring.LevelNamespace,
namespace: corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
CreationTimestamp: metav1.Time{
Time: time.Unix(1585836666, 0),
},
},
},
expected: queryOptions{
start: time.Unix(1585836666, 0),
end: time.Unix(1585839999, 0),
step: time.Minute,
identifier: model.IdentifierNamespace,
metricFilter: ".*",
namedMetrics: model.NamespaceMetrics,
option: monitoring.NamespaceOption{
ResourceFilter: ".*",
NamespaceName: "default",
},
},
expectedErr: false,
},
{
params: reqParams{
start: "1585830000",
end: "1585839999",
step: "1m",
namespaceName: "default",
},
lvl: monitoring.LevelNamespace,
namespace: corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
CreationTimestamp: metav1.Time{
Time: time.Unix(1589999999, 0),
},
},
},
expectedErr: true,
},
{
params: reqParams{
start: "1585830000",
end: "1585839999",
step: "1m",
namespaceName: "non-exist",
},
lvl: monitoring.LevelNamespace,
namespace: corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
CreationTimestamp: metav1.Time{
Time: time.Unix(1589999999, 0),
},
},
},
expectedErr: true,
},
{
params: reqParams{
time: "1585830000",
componentType: "etcd",
metricFilter: "etcd_server_list",
},
lvl: monitoring.LevelComponent,
expected: queryOptions{
time: time.Unix(1585830000, 0),
metricFilter: "etcd_server_list",
namedMetrics: model.EtcdMetrics,
option: monitoring.ComponentOption{},
},
expectedErr: false,
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
client := fake.NewSimpleClientset(&tt.namespace)
handler := newHandler(client, nil)
result, err := handler.makeQueryOptions(tt.params, tt.lvl)
if err != nil {
if !tt.expectedErr {
t.Fatalf("unexpected err: %s.", err.Error())
}
return
}
if tt.expectedErr {
t.Fatalf("failed to catch error.")
}
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected return: %v.", result)
}
})
}
}

View File

@@ -15,16 +15,16 @@
limitations under the License.
*/
package v1alpha2
package v1alpha3
import (
"github.com/emicklei/go-restful"
"github.com/emicklei/go-restful-openapi"
"k8s.io/apimachinery/pkg/runtime/schema"
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"k8s.io/client-go/kubernetes"
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
model "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
)
@@ -36,7 +36,7 @@ const (
var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"}
func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient monitoring.Interface) error {
func AddToContainer(c *restful.Container, k8sClient kubernetes.Interface, monitoringClient monitoring.Interface) error {
ws := runtime.NewWebService(GroupVersion)
h := newHandler(k8sClient, monitoringClient)
@@ -50,8 +50,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.ClusterMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes").
@@ -68,8 +68,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.NodeMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes/{node}").
@@ -82,8 +82,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.NodeMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/workspaces").
@@ -100,8 +100,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/workspaces/{workspace}").
@@ -114,8 +114,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/workspaces/{workspace}/namespaces").
@@ -133,8 +133,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces").
@@ -151,8 +151,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}").
@@ -165,8 +165,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/workloads").
@@ -184,8 +184,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkloadMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/workloads/{kind}").
@@ -204,8 +204,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkloadMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods").
@@ -223,8 +223,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}").
@@ -238,8 +238,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/workloads/{kind}/{workload}/pods").
@@ -259,8 +259,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes/{node}/pods").
@@ -278,8 +278,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes/{node}/pods/{pod}").
@@ -293,8 +293,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers").
@@ -313,8 +313,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.ContainerMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers/{container}").
@@ -329,8 +329,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.ContainerMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/storageclasses/{storageclass}/persistentvolumeclaims").
@@ -348,8 +348,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PVCMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/persistentvolumeclaims").
@@ -367,8 +367,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)).
Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PVCMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/persistentvolumeclaims/{pvc}").
@@ -382,8 +382,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.PVCMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/components/{component}").
@@ -396,8 +396,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient
Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)).
Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.ComponentMetricsTag}).
Writes(v1alpha2.APIResponse{}).
Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})).
Writes(model.Metrics{}).
Returns(http.StatusOK, RespOK, model.Metrics{})).
Produces(restful.MIME_JSON)
c.Add(ws)

View File

@@ -19,19 +19,15 @@
package monitoring
import (
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"time"
)
type MonitoringOperator interface {
GetMetrics(stmts []string, time time.Time) (v1alpha2.APIResponse, error)
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) (v1alpha2.APIResponse, error)
GetNamedMetrics(time time.Time, opt monitoring.QueryOption) (v1alpha2.APIResponse, error)
GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt monitoring.QueryOption) (v1alpha2.APIResponse, error)
SortMetrics(raw v1alpha2.APIResponse, target, order, identifier string) (v1alpha2.APIResponse, int)
PageMetrics(raw v1alpha2.APIResponse, page, limit, rows int) v1alpha2.APIResponse
GetMetrics(stmts []string, time time.Time) Metrics
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) Metrics
GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics
GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics
}
type monitoringOperator struct {
@@ -43,27 +39,21 @@ func NewMonitoringOperator(client monitoring.Interface) MonitoringOperator {
}
// TODO(huanggze): reserve for custom monitoring
func (mo monitoringOperator) GetMetrics(stmts []string, time time.Time) (v1alpha2.APIResponse, error) {
func (mo monitoringOperator) GetMetrics(stmts []string, time time.Time) Metrics {
panic("implement me")
}
// TODO(huanggze): reserve for custom monitoring
func (mo monitoringOperator) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) (v1alpha2.APIResponse, error) {
func (mo monitoringOperator) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) Metrics {
panic("implement me")
}
func (mo monitoringOperator) GetNamedMetrics(time time.Time, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) {
metrics, err := mo.c.GetNamedMetrics(time, opt)
if err != nil {
klog.Error(err)
}
return v1alpha2.APIResponse{Results: metrics}, err
func (mo monitoringOperator) GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics {
ress := mo.c.GetNamedMetrics(metrics, time, opt)
return Metrics{Results: ress}
}
func (mo monitoringOperator) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) {
metrics, err := mo.c.GetNamedMetricsOverTime(start, end, step, opt)
if err != nil {
klog.Error(err)
}
return v1alpha2.APIResponse{Results: metrics}, err
func (mo monitoringOperator) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics {
ress := mo.c.GetNamedMetricsOverTime(metrics, start, end, step, opt)
return Metrics{Results: ress}
}

View File

@@ -1,19 +1,5 @@
package monitoring
type MonitoringLevel int
const (
LevelCluster = MonitoringLevel(1) << iota
LevelNode
LevelWorkspace
LevelNamespace
LevelWorkload
LevelPod
LevelContainer
LevelPVC
LevelComponent
)
var ClusterMetrics = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
@@ -161,7 +147,6 @@ var WorkloadMetrics = []string{
"workload_memory_usage_wo_cache",
"workload_net_bytes_transmitted",
"workload_net_bytes_received",
"workload_deployment_replica",
"workload_deployment_replica_available",
"workload_statefulset_replica",
@@ -198,7 +183,7 @@ var PVCMetrics = []string{
"pvc_bytes_utilisation",
}
var ComponentMetrics = []string{
var EtcdMetrics = []string{
"etcd_server_list",
"etcd_server_total",
"etcd_server_up_total",
@@ -219,34 +204,20 @@ var ComponentMetrics = []string{
"etcd_disk_wal_fsync_duration_quantile",
"etcd_disk_backend_commit_duration",
"etcd_disk_backend_commit_duration_quantile",
}
var APIServerMetrics = []string{
"apiserver_up_sum",
"apiserver_request_rate",
"apiserver_request_by_verb_rate",
"apiserver_request_latencies",
"apiserver_request_by_verb_latencies",
}
var SchedulerMetrics = []string{
"scheduler_up_sum",
"scheduler_schedule_attempts",
"scheduler_schedule_attempt_rate",
"scheduler_e2e_scheduling_latency",
"scheduler_e2e_scheduling_latency_quantile",
"controller_manager_up_sum",
"coredns_up_sum",
"coredns_cache_hits",
"coredns_cache_misses",
"coredns_dns_request_rate",
"coredns_dns_request_duration",
"coredns_dns_request_duration_quantile",
"coredns_dns_request_by_type_rate",
"coredns_dns_request_by_rcode_rate",
"coredns_panic_rate",
"coredns_proxy_request_rate",
"coredns_proxy_request_duration",
"coredns_proxy_request_duration_quantile",
"prometheus_up_sum",
"prometheus_tsdb_head_samples_appended_rate",
}

View File

@@ -19,7 +19,6 @@
package monitoring
import (
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"math"
"sort"
@@ -41,7 +40,7 @@ const (
type wrapper struct {
monitoring.MetricData
by func(p, q *monitoring.MetricValue) bool
identifier, order string
}
func (w wrapper) Len() int {
@@ -49,156 +48,142 @@ func (w wrapper) Len() int {
}
func (w wrapper) Less(i, j int) bool {
return w.by(&w.MetricValues[i], &w.MetricValues[j])
p := w.MetricValues[i]
q := w.MetricValues[j]
if p.Sample.Value() == q.Sample.Value() {
return p.Metadata[w.identifier] < q.Metadata[w.identifier]
}
switch w.order {
case OrderAscending:
return p.Sample.Value() < q.Sample.Value()
default:
return p.Sample.Value() > q.Sample.Value()
}
}
func (w wrapper) Swap(i, j int) {
w.MetricValues[i], w.MetricValues[j] = w.MetricValues[j], w.MetricValues[i]
func (id wrapper) Swap(i, j int) {
id.MetricValues[i], id.MetricValues[j] = id.MetricValues[j], id.MetricValues[i]
}
// The sortMetrics sorts a group of resources by a given metric
// SortMetrics sorts a group of resources by a given metric. Range query doesn't support ranking.
// Example:
//
// before sorting
// |------| Metric 1 | Metric 2 | Metric 3 |
// | ID a | 1 | XL | |
// | ID b | 1 | S | |
// | ID c | 3 | M | |
// Before sorting:
// | ID | Metric 1 | Metric 2 | Metric 3 |
// | a | 1 | XL | |
// | b | 1 | S | |
// | c | 3 | M | |
//
// sort by metrics_2
// |------| Metric 1 | Metric 2 (asc) | Metric 3 |
// | ID a | 1 | XL | |
// | ID c | 3 | M | |
// | ID b | 1 | S | |
//
// ranking can only be applied to instant query results, not range query
func (mo monitoringOperator) SortMetrics(raw v1alpha2.APIResponse, target, order, identifier string) (v1alpha2.APIResponse, int) {
if target == "" || len(raw.Results) == 0 {
return raw, -1
}
if order == "" {
order = OrderDescending
}
var currentResourceMap = make(map[string]int)
// resource-ordinal map
var indexMap = make(map[string]int)
i := 0
for _, item := range raw.Results {
if item.MetricType == monitoring.MetricTypeVector && item.Status == monitoring.StatusSuccess {
if item.MetricName == target {
if order == OrderAscending {
sort.Sort(wrapper{item.MetricData, func(p, q *monitoring.MetricValue) bool {
if p.Sample[1] == q.Sample[1] {
return p.Metadata[identifier] < q.Metadata[identifier]
}
return p.Sample[1] < q.Sample[1]
}})
} else {
sort.Sort(wrapper{item.MetricData, func(p, q *monitoring.MetricValue) bool {
if p.Sample[1] == q.Sample[1] {
return p.Metadata[identifier] > q.Metadata[identifier]
}
return p.Sample[1] > q.Sample[1]
}})
}
for _, r := range item.MetricValues {
// record the ordinal of resource to indexMap
resourceName, exist := r.Metadata[identifier]
if exist {
if _, exist := indexMap[resourceName]; !exist {
indexMap[resourceName] = i
i = i + 1
}
}
}
}
// get total number of rows
for _, r := range item.MetricValues {
k, ok := r.Metadata[identifier]
if ok {
currentResourceMap[k] = 1
}
}
}
}
var keys []string
for k := range currentResourceMap {
keys = append(keys, k)
}
sort.Strings(keys)
for _, resource := range keys {
if _, exist := indexMap[resource]; !exist {
indexMap[resource] = i
i = i + 1
}
}
// sort other metrics
for i := 0; i < len(raw.Results); i++ {
item := raw.Results[i]
if item.MetricType == monitoring.MetricTypeVector && item.Status == monitoring.StatusSuccess {
sortedMetric := make([]monitoring.MetricValue, len(indexMap))
for j := 0; j < len(item.MetricValues); j++ {
r := item.MetricValues[j]
k, exist := r.Metadata[identifier]
if exist {
index, exist := indexMap[k]
if exist {
sortedMetric[index] = r
}
}
}
raw.Results[i].MetricValues = sortedMetric
}
}
return raw, len(indexMap)
}
func (mo monitoringOperator) PageMetrics(raw v1alpha2.APIResponse, page, limit, rows int) v1alpha2.APIResponse {
if page <= 0 || limit <= 0 || rows <= 0 || len(raw.Results) == 0 {
// After sorting: target=metric_2, order=asc, identifier=id
// | ID | Metric 1 | Metric 2 (asc) | Metric 3 |
// | a | 1 | XL | |
// | c | 3 | M | |
// | b | 1 | S | |
func (raw *Metrics) Sort(target, order, identifier string) *Metrics {
if target == "" || identifier == "" || len(raw.Results) == 0 {
return raw
}
// matrix type can not be sorted
resourceSet := make(map[string]bool) // resource set records possible values of the identifier
resourceOrdinal := make(map[string]int) // resource-ordinal map
ordinal := 0
for _, item := range raw.Results {
if item.MetricType != monitoring.MetricTypeVector {
return raw
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
continue
}
if item.MetricName == target {
sort.Sort(wrapper{
MetricData: item.MetricData,
identifier: identifier,
order: order,
})
for _, mv := range item.MetricValues {
// Record ordinals in the final result
v, ok := mv.Metadata[identifier]
if ok && v != "" {
resourceOrdinal[v] = ordinal
ordinal++
}
}
}
// Add every unique identifier value to the set
for _, mv := range item.MetricValues {
v, ok := mv.Metadata[identifier]
if ok && v != "" {
resourceSet[v] = true
}
}
}
// the i page: [(page-1) * limit, (page) * limit - 1]
start := (page - 1) * limit
end := (page)*limit - 1
var resourceList []string
for k := range resourceSet {
resourceList = append(resourceList, k)
}
sort.Strings(resourceList)
for i := 0; i < len(raw.Results); i++ {
if raw.Results[i].MetricType != monitoring.MetricTypeVector || raw.Results[i].Status != monitoring.StatusSuccess {
// Fill resource-ordinal map with resources never present in the target, and give them ordinals.
for _, r := range resourceList {
if _, ok := resourceOrdinal[r]; !ok {
resourceOrdinal[r] = ordinal
ordinal++
}
}
// Sort metrics
for i, item := range raw.Results {
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
continue
}
resultLen := len(raw.Results[i].MetricValues)
if start >= resultLen {
sorted := make([]monitoring.MetricValue, len(resourceList))
for _, mv := range item.MetricValues {
v, ok := mv.Metadata[identifier]
if ok && v != "" {
ordinal, _ := resourceOrdinal[v]
sorted[ordinal] = mv
}
}
raw.Results[i].MetricValues = sorted
}
raw.CurrentPage = 1
raw.TotalPages = 1
raw.TotalItems = len(resourceList)
return raw
}
func (raw *Metrics) Page(page, limit int) *Metrics {
if page < 1 || limit < 1 || len(raw.Results) == 0 {
return raw
}
start := (page - 1) * limit
end := page * limit
for i, item := range raw.Results {
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
continue
}
total := len(item.MetricValues)
if start >= total {
raw.Results[i].MetricValues = nil
continue
}
if end >= resultLen {
end = resultLen - 1
if end >= total {
end = total
}
slice := raw.Results[i].MetricValues[start : end+1]
raw.Results[i].MetricValues = slice
raw.Results[i].MetricValues = item.MetricValues[start:end]
}
raw.CurrentPage = page
raw.TotalPage = int(math.Ceil(float64(rows) / float64(limit)))
raw.TotalItem = rows
raw.TotalPages = int(math.Ceil(float64(raw.TotalItems) / float64(limit)))
return raw
}

View File

@@ -0,0 +1,91 @@
package monitoring
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/json-iterator/go"
"io/ioutil"
"testing"
)
func TestSort(t *testing.T) {
tests := []struct {
name string
target string
order string
identifier string
source string
expected string
}{
{"sort in ascending order", "node_cpu_utilisation", "asc", "node", "source-node-metrics.json", "sorted-node-metrics-asc.json"},
{"sort in descending order", "node_memory_utilisation", "desc", "node", "source-node-metrics.json", "sorted-node-metrics-desc.json"},
{"sort faulty metrics", "node_memory_utilisation", "desc", "node", "faulty-node-metrics.json", "faulty-node-metrics-sorted.json"},
{"sort metrics with an blank node", "node_memory_utilisation", "desc", "node", "blank-node-metrics.json", "blank-node-metrics-sorted.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
source, expected, err := jsonFromFile(tt.source, tt.expected)
if err != nil {
t.Fatal(err)
}
result := source.Sort(tt.target, tt.order, tt.identifier)
if diff := cmp.Diff(*result, *expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func TestPage(t *testing.T) {
tests := []struct {
name string
page int
limit int
source string
expected string
}{
{"page 0 limit 5", 0, 5, "sorted-node-metrics-asc.json", "sorted-node-metrics-asc.json"},
{"page 1 limit 5", 1, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-1.json"},
{"page 2 limit 5", 2, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-2.json"},
{"page 3 limit 5", 3, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-3.json"},
{"page faulty metrics", 1, 2, "faulty-node-metrics-sorted.json", "faulty-node-metrics-paged.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
source, expected, err := jsonFromFile(tt.source, tt.expected)
if err != nil {
t.Fatal(err)
}
result := source.Page(tt.page, tt.limit)
if diff := cmp.Diff(*result, *expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func jsonFromFile(sourceFile, expectedFile string) (*Metrics, *Metrics, error) {
sourceJson := &Metrics{}
expectedJson := &Metrics{}
json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", sourceFile))
if err != nil {
return nil, nil, err
}
err = jsoniter.Unmarshal(json, sourceJson)
if err != nil {
return nil, nil, err
}
json, err = ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
if err != nil {
return nil, nil, err
}
err = jsoniter.Unmarshal(json, expectedJson)
if err != nil {
return nil, nil, err
}
return sourceJson, expectedJson, nil
}

View File

@@ -0,0 +1,77 @@
{
"results":[
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":3
}

View File

@@ -0,0 +1,92 @@
{
"results": [
{
"metric_name": "node_disk_size_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.42012898861983516
]
},
{
"metric": {
"node": ""
},
"value": [
1585658599.193,
0.2601006025131434
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.29849334024542695
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.2588273152865106
]
}
]
}
},
{
"metric_name": "node_memory_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.195,
0.5286875837861773
]
},
{
"metric": {
"node": ""
},
"value": [
1585658599.195,
0.1446648505469157
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.195,
0.23637090535053928
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.195,
0.2497060264216553
]
}
]
}
}
]
}

View File

@@ -0,0 +1,63 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"error":"error"
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
}
]
}
}
],
"page":1,
"total_page":2,
"total_item":4
}

View File

@@ -0,0 +1,99 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"error":"error"
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":4
}

View File

@@ -0,0 +1,96 @@
{
"results": [
{
"metric_name": "node_cpu_utilisation",
"error": "error"
},
{
"metric_name": "node_disk_size_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.42012898861983516
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.193,
0.2601006025131434
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.29849334024542695
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.2588273152865106
]
}
]
}
},
{
"metric_name": "node_memory_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.195,
0.5286875837861773
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.195,
0.1446648505469157
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.195,
0.23637090535053928
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.195,
0.2497060264216553
]
}
]
}
}
]
}

View File

@@ -0,0 +1,166 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.021645833333483702
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.03250000000007276
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.05066666666655995
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.05210416666595847
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.06745833333334303
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.3335848564534758
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.21351118996831508
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.35981263055856705
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.195,
0.12824588180084573
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.195,
0.21291125105270192
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.195,
0.40309723127991315
]
}
]
}
}
],
"page":1,
"total_page":2,
"total_item":8
}

View File

@@ -0,0 +1,112 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.07443750000044626
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.07756249999996119
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.18095833333306172
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.4329682466178235
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.195,
0.823247832787681
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
}
]
}
}
],
"page":2,
"total_page":2,
"total_item":8
}

View File

@@ -0,0 +1,25 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector"
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector"
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector"
}
}
],
"page":3,
"total_page":2,
"total_item":8
}

View File

@@ -0,0 +1,247 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.021645833333483702
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.03250000000007276
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.05066666666655995
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.05210416666595847
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.06745833333334303
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.07443750000044626
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.07756249999996119
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.18095833333306172
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.3335848564534758
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.21351118996831508
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.35981263055856705
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.4329682466178235
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.195,
0.12824588180084573
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.195,
0.21291125105270192
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.195,
0.40309723127991315
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.195,
0.823247832787681
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":8
}

View File

@@ -0,0 +1,247 @@
{
"results":[
{
"metric_name":"node_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.07756249999996119
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.18095833333306172
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.06745833333334303
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.05066666666655995
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.07443750000044626
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.05210416666595847
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.03250000000007276
]
},
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.021645833333483702
]
}
]
}
},
{
"metric_name":"node_disk_size_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.193,
0.4329682466178235
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.193,
0.42012898861983516
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.193,
0.35981263055856705
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.193,
0.2588273152865106
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.193,
0.29849334024542695
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.193,
0.21351118996831508
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.193,
0.2601006025131434
]
},
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.193,
0.3335848564534758
]
}
]
}
},
{
"metric_name":"node_memory_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"node":"i-o13skypq"
},
"value":[
1585658599.195,
0.823247832787681
]
},
{
"metric":{
"node":"i-2dazc1d6"
},
"value":[
1585658599.195,
0.5286875837861773
]
},
{
"metric":{
"node":"i-xfcxdn7z"
},
"value":[
1585658599.195,
0.40309723127991315
]
},
{
"metric":{
"node":"i-hgcoippu"
},
"value":[
1585658599.195,
0.2497060264216553
]
},
{
"metric":{
"node":"i-ezjb7gsk"
},
"value":[
1585658599.195,
0.23637090535053928
]
},
{
"metric":{
"node":"i-ircdnrao"
},
"value":[
1585658599.195,
0.21291125105270192
]
},
{
"metric":{
"node":"i-9jtsi522"
},
"value":[
1585658599.195,
0.1446648505469157
]
},
{
"metric":{
"node":"i-tl1i71hr"
},
"value":[
1585658599.195,
0.12824588180084573
]
}
]
}
}
],
"page":1,
"total_page":1,
"total_item":8
}

View File

@@ -0,0 +1,244 @@
{
"results": [
{
"metric_name": "node_cpu_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.18095833333306172
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.193,
0.03250000000007276
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.07443750000044626
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.05066666666655995
]
},
{
"metric": {
"node": "i-ircdnrao"
},
"value": [
1585658599.193,
0.05210416666595847
]
},
{
"metric": {
"node": "i-o13skypq"
},
"value": [
1585658599.193,
0.07756249999996119
]
},
{
"metric": {
"node": "i-tl1i71hr"
},
"value": [
1585658599.193,
0.021645833333483702
]
},
{
"metric": {
"node": "i-xfcxdn7z"
},
"value": [
1585658599.193,
0.06745833333334303
]
}
]
}
},
{
"metric_name": "node_disk_size_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.193,
0.42012898861983516
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.193,
0.2601006025131434
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.193,
0.29849334024542695
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.193,
0.2588273152865106
]
},
{
"metric": {
"node": "i-ircdnrao"
},
"value": [
1585658599.193,
0.21351118996831508
]
},
{
"metric": {
"node": "i-o13skypq"
},
"value": [
1585658599.193,
0.4329682466178235
]
},
{
"metric": {
"node": "i-tl1i71hr"
},
"value": [
1585658599.193,
0.3335848564534758
]
},
{
"metric": {
"node": "i-xfcxdn7z"
},
"value": [
1585658599.193,
0.35981263055856705
]
}
]
}
},
{
"metric_name": "node_memory_utilisation",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"node": "i-2dazc1d6"
},
"value": [
1585658599.195,
0.5286875837861773
]
},
{
"metric": {
"node": "i-9jtsi522"
},
"value": [
1585658599.195,
0.1446648505469157
]
},
{
"metric": {
"node": "i-ezjb7gsk"
},
"value": [
1585658599.195,
0.23637090535053928
]
},
{
"metric": {
"node": "i-hgcoippu"
},
"value": [
1585658599.195,
0.2497060264216553
]
},
{
"metric": {
"node": "i-ircdnrao"
},
"value": [
1585658599.195,
0.21291125105270192
]
},
{
"metric": {
"node": "i-o13skypq"
},
"value": [
1585658599.195,
0.823247832787681
]
},
{
"metric": {
"node": "i-tl1i71hr"
},
"value": [
1585658599.195,
0.12824588180084573
]
},
{
"metric": {
"node": "i-xfcxdn7z"
},
"value": [
1585658599.195,
0.40309723127991315
]
}
]
}
}
]
}

View File

@@ -1,10 +1,10 @@
package v1alpha2
package monitoring
import "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
type APIResponse struct {
type Metrics struct {
Results []monitoring.Metric `json:"results" description:"actual array of results"`
CurrentPage int `json:"page,omitempty" description:"current page returned"`
TotalPage int `json:"total_page,omitempty" description:"total number of pages"`
TotalItem int `json:"total_item,omitempty" description:"page size"`
TotalPages int `json:"total_page,omitempty" description:"total number of pages"`
TotalItems int `json:"total_item,omitempty" description:"page size"`
}

View File

@@ -170,7 +170,9 @@ func (d *FakeDevops) GetCredentialInProject(projectId, id string, content bool)
func (d *FakeDevops) GetCredentialsInProject(projectId string) ([]*devops.Credential, error) {
return nil, nil
}
func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) { return nil, nil }
func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) {
return nil, nil
}
// BuildGetter
func (d *FakeDevops) GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*devops.Build, error) {

View File

@@ -2,40 +2,9 @@ package monitoring
import "time"
const (
StatusSuccess = "success"
StatusError = "error"
MetricTypeMatrix = "matrix"
MetricTypeVector = "vector"
)
type Metric struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
Status string `json:"status" description:"result status, one of error, success"`
MetricData `json:"data" description:"actual metric result"`
ErrorType string `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
type MetricData struct {
MetricType string `json:"resultType" description:"result type, one of matrix, vector"`
MetricValues []MetricValue `json:"result" description:"metric data including labels, time series and values"`
}
type Point [2]float64
type MetricValue struct {
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
}
type Interface interface {
// The `stmts` defines statements, expressions or rules (eg. promql in Prometheus) for querying specific metrics.
GetMetrics(stmts []string, time time.Time) ([]Metric, error)
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]Metric, error)
// Get named metrics (eg. node_cpu_usage)
GetNamedMetrics(time time.Time, opt QueryOption) ([]Metric, error)
GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt QueryOption) ([]Metric, error)
GetMetrics(exprs []string, time time.Time) []Metric
GetMetricsOverTime(exprs []string, start, end time.Time, step time.Duration) []Metric
GetNamedMetrics(metrics []string, time time.Time, opt QueryOption) []Metric
GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt QueryOption) []Metric
}

View File

@@ -1,178 +1,153 @@
package prometheus
import (
"fmt"
"github.com/json-iterator/go"
"io/ioutil"
"context"
"github.com/prometheus/client_golang/api"
apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
"net/url"
"regexp"
"sync"
"time"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// prometheus implements monitoring interface backed by Prometheus
type prometheus struct {
options *Options
client *http.Client
client apiv1.API
}
func NewPrometheus(options *Options) monitoring.Interface {
return &prometheus{
options: options,
client: &http.Client{Timeout: 10 * time.Second},
func NewPrometheus(options *Options) (monitoring.Interface, error) {
cfg := api.Config{
Address: options.Endpoint,
}
client, err := api.NewClient(cfg)
return prometheus{client: apiv1.NewAPI(client)}, err
}
// TODO(huanggze): reserve for custom monitoring
func (p *prometheus) GetMetrics(stmts []string, time time.Time) ([]monitoring.Metric, error) {
func (p prometheus) GetMetrics(stmts []string, time time.Time) []monitoring.Metric {
panic("implement me")
}
// TODO(huanggze): reserve for custom monitoring
func (p *prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]monitoring.Metric, error) {
func (p prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) []monitoring.Metric {
panic("implement me")
}
func (p *prometheus) GetNamedMetrics(ts time.Time, o monitoring.QueryOption) ([]monitoring.Metric, error) {
metrics := make([]monitoring.Metric, 0)
var mtx sync.Mutex // guard metrics
func (p prometheus) GetNamedMetrics(metrics []string, ts time.Time, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
var mtx sync.Mutex
var wg sync.WaitGroup
opts := monitoring.NewQueryOptions()
o.Apply(opts)
errCh := make(chan error)
for _, metric := range opts.NamedMetrics {
matched, _ := regexp.MatchString(opts.MetricFilter, metric)
if matched {
exp := makeExpression(metric, *opts)
wg.Add(1)
go func(metric, exp string) {
res, err := p.query(exp, ts)
if err != nil {
select {
case errCh <- err: // Record error once
default:
}
} else {
res.MetricName = metric // Add metric name
mtx.Lock()
metrics = append(metrics, res)
mtx.Unlock()
}
wg.Done()
}(metric, exp)
}
for _, metric := range metrics {
wg.Add(1)
go func(metric string) {
parsedResp := monitoring.Metric{MetricName: metric}
value, err := p.client.Query(context.Background(), makeExpr(metric, *opts), ts)
if err != nil {
parsedResp.Error = err.(*apiv1.Error).Msg
} else {
parsedResp.MetricData = parseQueryResp(value)
}
mtx.Lock()
res = append(res, parsedResp)
mtx.Unlock()
wg.Done()
}(metric)
}
wg.Wait()
select {
case err := <-errCh:
return nil, err
default:
return metrics, nil
}
return res
}
func (p *prometheus) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, o monitoring.QueryOption) ([]monitoring.Metric, error) {
metrics := make([]monitoring.Metric, 0)
var mtx sync.Mutex // guard metrics
func (p prometheus) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
var mtx sync.Mutex
var wg sync.WaitGroup
opts := monitoring.NewQueryOptions()
o.Apply(opts)
errCh := make(chan error)
for _, metric := range opts.NamedMetrics {
matched, _ := regexp.MatchString(opts.MetricFilter, metric)
if matched {
exp := makeExpression(metric, *opts)
wg.Add(1)
go func(metric, exp string) {
res, err := p.rangeQuery(exp, start, end, step)
if err != nil {
select {
case errCh <- err: // Record error once
default:
}
} else {
res.MetricName = metric // Add metric name
mtx.Lock()
metrics = append(metrics, res)
mtx.Unlock()
}
wg.Done()
}(metric, exp)
}
timeRange := apiv1.Range{
Start: start,
End: end,
Step: step,
}
for _, metric := range metrics {
wg.Add(1)
go func(metric string) {
parsedResp := monitoring.Metric{MetricName: metric}
value, err := p.client.QueryRange(context.Background(), makeExpr(metric, *opts), timeRange)
if err != nil {
parsedResp.Error = err.(*apiv1.Error).Msg
} else {
parsedResp.MetricData = parseQueryRangeResp(value)
}
mtx.Lock()
res = append(res, parsedResp)
mtx.Unlock()
wg.Done()
}(metric)
}
wg.Wait()
select {
case err := <-errCh:
return nil, err
default:
return metrics, nil
}
return res
}
func (p prometheus) query(exp string, ts time.Time) (monitoring.Metric, error) {
params := &url.Values{}
params.Set("time", ts.Format(time.RFC3339))
params.Set("query", exp)
func parseQueryRangeResp(value model.Value) monitoring.MetricData {
res := monitoring.MetricData{MetricType: monitoring.MetricTypeMatrix}
u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode())
data, _ := value.(model.Matrix)
var m monitoring.Metric
response, err := p.client.Get(u)
if err != nil {
return monitoring.Metric{}, err
for _, v := range data {
mv := monitoring.MetricValue{
Metadata: make(map[string]string),
}
for k, v := range v.Metric {
mv.Metadata[string(k)] = string(v)
}
for _, k := range v.Values {
mv.Series = append(mv.Series, monitoring.Point{float64(k.Timestamp) / 1000, float64(k.Value)})
}
res.MetricValues = append(res.MetricValues, mv)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return monitoring.Metric{}, err
}
defer response.Body.Close()
err = json.Unmarshal(body, m)
if err != nil {
return monitoring.Metric{}, err
}
return m, nil
return res
}
func (p prometheus) rangeQuery(exp string, start, end time.Time, step time.Duration) (monitoring.Metric, error) {
params := &url.Values{}
params.Set("start", start.Format(time.RFC3339))
params.Set("end", end.Format(time.RFC3339))
params.Set("step", step.String())
params.Set("query", exp)
func parseQueryResp(value model.Value) monitoring.MetricData {
res := monitoring.MetricData{MetricType: monitoring.MetricTypeVector}
u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode())
data, _ := value.(model.Vector)
var m monitoring.Metric
response, err := p.client.Get(u)
if err != nil {
return monitoring.Metric{}, err
for _, v := range data {
mv := monitoring.MetricValue{
Metadata: make(map[string]string),
}
for k, v := range v.Metric {
mv.Metadata[string(k)] = string(v)
}
mv.Sample = monitoring.Point{float64(v.Timestamp) / 1000, float64(v.Value)}
res.MetricValues = append(res.MetricValues, mv)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return monitoring.Metric{}, err
}
defer response.Body.Close()
err = json.Unmarshal(body, m)
if err != nil {
return monitoring.Metric{}, err
}
return m, nil
return res
}

View File

@@ -0,0 +1,95 @@
package prometheus
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/json-iterator/go"
"io/ioutil"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestGetNamedMetrics(t *testing.T) {
tests := []struct {
name string
fakeResp string
expected string
}{
{"prom returns good values", "metrics-vector-type-prom.json", "metrics-vector-type-res.json"},
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected, err := jsonFromFile(tt.expected)
if err != nil {
t.Fatal(err)
}
srv := mockPrometheusService("/api/v1/query", tt.fakeResp)
defer srv.Close()
client, _ := NewPrometheus(&Options{Endpoint: srv.URL})
result := client.GetNamedMetrics([]string{"cluster_cpu_utilisation"}, time.Now(), monitoring.ClusterOption{})
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func TestGetNamedMetricsOverTime(t *testing.T) {
tests := []struct {
name string
fakeResp string
expected string
}{
{"prom returns good values", "metrics-matrix-type-prom.json", "metrics-matrix-type-res.json"},
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected, err := jsonFromFile(tt.expected)
if err != nil {
t.Fatal(err)
}
srv := mockPrometheusService("/api/v1/query_range", tt.fakeResp)
defer srv.Close()
client, _ := NewPrometheus(&Options{Endpoint: srv.URL})
result := client.GetNamedMetricsOverTime([]string{"cluster_cpu_utilisation"}, time.Now().Add(-time.Minute*3), time.Now(), time.Minute, monitoring.ClusterOption{})
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func mockPrometheusService(pattern, fakeResp string) *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
b, _ := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", fakeResp))
res.Write(b)
})
return httptest.NewServer(mux)
}
func jsonFromFile(expectedFile string) ([]monitoring.Metric, error) {
expectedJson := []monitoring.Metric{}
json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
if err != nil {
return expectedJson, err
}
err = jsoniter.Unmarshal(json, &expectedJson)
if err != nil {
return expectedJson, err
}
return expectedJson, nil
}

View File

@@ -25,7 +25,6 @@ const (
Deployment = "Deployment"
)
//TODO(huanggze): move this part to a ConfigMap
var promQLTemplates = map[string]string{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
@@ -256,31 +255,33 @@ var promQLTemplates = map[string]string{
"prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`,
}
func makeExpression(metric string, opt monitoring.QueryOptions) string {
func makeExpr(metric string, opt monitoring.QueryOptions) string {
tmpl := promQLTemplates[metric]
switch opt.Level {
case monitoring.LevelCluster:
return tmpl
case monitoring.LevelNode:
makeNodeMetricExpression(tmpl, opt)
return makeNodeMetricExpr(tmpl, opt)
case monitoring.LevelWorkspace:
makeWorkspaceMetricExpression(tmpl, opt)
return makeWorkspaceMetricExpr(tmpl, opt)
case monitoring.LevelNamespace:
makeNamespaceMetricExpression(tmpl, opt)
return makeNamespaceMetricExpr(tmpl, opt)
case monitoring.LevelWorkload:
makeWorkloadMetricExpression(tmpl, opt)
return makeWorkloadMetricExpr(tmpl, opt)
case monitoring.LevelPod:
makePodMetricExpression(tmpl, opt)
return makePodMetricExpr(tmpl, opt)
case monitoring.LevelContainer:
makeContainerMetricExpression(tmpl, opt)
return makeContainerMetricExpr(tmpl, opt)
case monitoring.LevelPVC:
makePVCMetricExpression(tmpl, opt)
return makePVCMetricExpr(tmpl, opt)
case monitoring.LevelComponent:
return tmpl
default:
return tmpl
}
return tmpl
}
func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeNodeMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var nodeSelector string
if o.NodeName != "" {
nodeSelector = fmt.Sprintf(`node="%s"`, o.NodeName)
@@ -290,7 +291,7 @@ func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string {
return strings.Replace(tmpl, "$1", nodeSelector, -1)
}
func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeWorkspaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var workspaceSelector string
if o.WorkspaceName != "" {
workspaceSelector = fmt.Sprintf(`label_kubesphere_io_workspace="%s"`, o.WorkspaceName)
@@ -300,7 +301,7 @@ func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", workspaceSelector, -1)
}
func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeNamespaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var namespaceSelector string
// For monitoring namespaces in the specific workspace
@@ -321,7 +322,7 @@ func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", namespaceSelector, -1)
}
func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeWorkloadMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var kindSelector, workloadSelector string
switch o.WorkloadKind {
case "deployment":
@@ -341,7 +342,7 @@ func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string
return strings.NewReplacer("$1", workloadSelector, "$2", kindSelector).Replace(tmpl)
}
func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makePodMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var podSelector, workloadSelector string
// For monitoriong pods of the specific workload
@@ -371,7 +372,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
// For monitoring pods on the specific node
// GET /nodes/{node}/pods/{pod}
if o.PodName != "" {
if o.NodeName != "" {
if o.PodName != "" {
podSelector = fmt.Sprintf(`pod="%s", node="%s"`, o.PodName, o.NodeName)
} else {
@@ -381,7 +382,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
return strings.NewReplacer("$1", workloadSelector, "$2", podSelector).Replace(tmpl)
}
func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeContainerMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var containerSelector string
if o.ContainerName != "" {
containerSelector = fmt.Sprintf(`pod_name="%s", namespace="%s", container_name="%s"`, o.PodName, o.NamespaceName, o.ContainerName)
@@ -391,7 +392,7 @@ func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", containerSelector, -1)
}
func makePVCMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makePVCMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var pvcSelector string
// For monitoring persistentvolumeclaims in the specific namespace

View File

@@ -0,0 +1,45 @@
package prometheus
import (
"github.com/google/go-cmp/cmp"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus/testdata"
"testing"
)
func TestMakeExpr(t *testing.T) {
tests := []struct {
name string
opt monitoring.QueryOptions
}{
{"cluster_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelCluster}},
{"node_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelNode, NodeName: "i-2dazc1d6"}},
{"node_cpu_total", monitoring.QueryOptions{Level: monitoring.LevelNode, ResourceFilter: "i-2dazc1d6|i-ezjb7gsk"}},
{"workspace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, WorkspaceName: "system-workspace"}},
{"workspace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, ResourceFilter: "system-workspace|demo"}},
{"namespace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, NamespaceName: "kube-system"}},
{"namespace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, ResourceFilter: "kube-system|default"}},
{"namespace_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelNamespace, WorkspaceName: "system-workspace", ResourceFilter: "kube-system|default"}},
{"workload_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: "deployment", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
{"workload_deployment_replica_available", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: ".*", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
{"pod_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", WorkloadKind: "deployment", WorkloadName: "elasticsearch", ResourceFilter: "elasticsearch-0"}},
{"pod_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", PodName: "elasticsearch-12345"}},
{"pod_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelPod, NodeName: "i-2dazc1d6", PodName: "elasticsearch-12345"}},
{"container_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ContainerName: "syscall"}},
{"container_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ResourceFilter: "syscall"}},
{"pvc_inodes_available", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", PersistentVolumeClaimName: "db-123"}},
{"pvc_inodes_used", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", ResourceFilter: "db-123"}},
{"pvc_inodes_total", monitoring.QueryOptions{Level: monitoring.LevelPVC, StorageClassName: "default", ResourceFilter: "db-123"}},
{"etcd_server_list", monitoring.QueryOptions{Level: monitoring.LevelComponent}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected := testdata.PromQLs[tt.name]
result := makeExpr(tt.name, tt.opt)
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}

View File

@@ -0,0 +1,5 @@
{
"status":"error",
"errorType":"internal",
"error":"inconsistent body for response code"
}

View File

@@ -0,0 +1,6 @@
[
{
"metric_name": "cluster_cpu_utilisation",
"error": "inconsistent body for response code"
}
]

View File

@@ -0,0 +1,206 @@
{
"status":"success",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"mysql-exporter",
"instance":"10.233.99.71:9104",
"job":"mysql-sz197k-prometheus-mysql-exporter",
"namespace":"exporter",
"pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9",
"service":"mysql-sz197k-prometheus-mysql-exporter"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"web",
"instance":"10.233.99.22:9090",
"job":"prometheus-k8s-system",
"namespace":"kubesphere-monitoring-system",
"pod":"prometheus-k8s-system-0",
"service":"prometheus-k8s-system"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
}
]
}
}

View File

@@ -0,0 +1,208 @@
[
{
"metric_name":"cluster_cpu_utilisation",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"mysql-exporter",
"instance":"10.233.99.71:9104",
"job":"mysql-sz197k-prometheus-mysql-exporter",
"namespace":"exporter",
"pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9",
"service":"mysql-sz197k-prometheus-mysql-exporter"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"web",
"instance":"10.233.99.22:9090",
"job":"prometheus-k8s-system",
"namespace":"kubesphere-monitoring-system",
"pod":"prometheus-k8s-system-0",
"service":"prometheus-k8s-system"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
}
]
}
}
]

View File

@@ -0,0 +1,68 @@
{
"status":"success",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
"1.123456"
]
}
]
}
}

View File

@@ -0,0 +1,70 @@
[
{
"metric_name":"cluster_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
1.123456
]
}
]
}
}
]

View File

@@ -0,0 +1,23 @@
package testdata
var PromQLs = map[string]string{
"cluster_cpu_utilisation": `:node_cpu_utilisation:avg1m`,
"node_cpu_utilisation": `node:node_cpu_utilisation:avg1m{node="i-2dazc1d6"}`,
"node_cpu_total": `node:node_num_cpu:sum{node=~"i-2dazc1d6|i-ezjb7gsk"}`,
"workspace_cpu_usage": `round(sum by (label_kubesphere_io_workspace) (namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", label_kubesphere_io_workspace="system-workspace"}), 0.001)`,
"workspace_memory_usage": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes:sum{namespace!="", label_kubesphere_io_workspace=~"system-workspace|demo", label_kubesphere_io_workspace!=""})`,
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace="kube-system"}, 0.001)`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", namespace=~"kube-system|default"}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", label_kubesphere_io_workspace="system-workspace", namespace=~"kube-system|default"}`,
"workload_cpu_usage": `round(namespace:workload_cpu_usage:sum{namespace="default", workload=~"Deployment:apiserver|coredns"}, 0.001)`,
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="default"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{owner_kind="ReplicaSet", owner_name=~"^deployment-[^-]{1,10}$"} * on (namespace, pod) group_left(node) kube_pod_info{pod=~"elasticsearch-0", namespace="default"}, 0.001)`,
"pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", namespace="default"}`,
"pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_working_set_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", node="i-2dazc1d6"}`,
"container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name="syscall"}[5m])), 0.001)`,
"container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name=~"syscall"})`,
"pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim="db-123"}`,
"pvc_inodes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim=~"db-123"}`,
"pvc_inodes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{storageclass="default", persistentvolumeclaim=~"db-123"}`,
"etcd_server_list": `label_replace(up{job="etcd"}, "node_ip", "$1", "instance", "(.*):.*")`,
}

View File

@@ -1,14 +1,26 @@
package monitoring
type Level int
const (
LevelCluster = 1 << iota
LevelNode
LevelWorkspace
LevelNamespace
LevelWorkload
LevelPod
LevelContainer
LevelPVC
LevelComponent
)
type QueryOption interface {
Apply(*QueryOptions)
}
type QueryOptions struct {
Level MonitoringLevel
NamedMetrics []string
Level Level
MetricFilter string
ResourceFilter string
NodeName string
WorkspaceName string
@@ -25,44 +37,35 @@ func NewQueryOptions() *QueryOptions {
return &QueryOptions{}
}
type ClusterOption struct {
MetricFilter string
}
type ClusterOption struct{}
func (co ClusterOption) Apply(o *QueryOptions) {
func (_ ClusterOption) Apply(o *QueryOptions) {
o.Level = LevelCluster
o.NamedMetrics = ClusterMetrics
}
type NodeOption struct {
MetricFilter string
ResourceFilter string
NodeName string
}
func (no NodeOption) Apply(o *QueryOptions) {
o.Level = LevelNode
o.NamedMetrics = NodeMetrics
o.ResourceFilter = no.ResourceFilter
o.NodeName = no.NodeName
}
type WorkspaceOption struct {
MetricFilter string
ResourceFilter string
WorkspaceName string
}
func (wo WorkspaceOption) Apply(o *QueryOptions) {
o.Level = LevelWorkspace
o.NamedMetrics = WorkspaceMetrics
o.MetricFilter = wo.MetricFilter
o.ResourceFilter = wo.ResourceFilter
o.WorkspaceName = wo.WorkspaceName
}
type NamespaceOption struct {
MetricFilter string
ResourceFilter string
WorkspaceName string
NamespaceName string
@@ -70,33 +73,25 @@ type NamespaceOption struct {
func (no NamespaceOption) Apply(o *QueryOptions) {
o.Level = LevelNamespace
o.NamedMetrics = NamespaceMetrics
o.MetricFilter = no.MetricFilter
o.ResourceFilter = no.ResourceFilter
o.WorkspaceName = no.WorkspaceName
o.NamespaceName = no.NamespaceName
}
type WorkloadOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
WorkloadKind string
WorkloadName string
}
func (wo WorkloadOption) Apply(o *QueryOptions) {
o.Level = LevelWorkload
o.NamedMetrics = WorkspaceMetrics
o.MetricFilter = wo.MetricFilter
o.ResourceFilter = wo.ResourceFilter
o.NamespaceName = wo.NamespaceName
o.WorkloadKind = wo.WorkloadKind
o.WorkloadName = wo.WorkloadName
}
type PodOption struct {
MetricFilter string
ResourceFilter string
NodeName string
NamespaceName string
@@ -107,8 +102,6 @@ type PodOption struct {
func (po PodOption) Apply(o *QueryOptions) {
o.Level = LevelPod
o.NamedMetrics = PodMetrics
o.MetricFilter = po.MetricFilter
o.ResourceFilter = po.ResourceFilter
o.NamespaceName = po.NamespaceName
o.WorkloadKind = po.WorkloadKind
@@ -116,7 +109,6 @@ func (po PodOption) Apply(o *QueryOptions) {
}
type ContainerOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
PodName string
@@ -125,8 +117,6 @@ type ContainerOption struct {
func (co ContainerOption) Apply(o *QueryOptions) {
o.Level = LevelContainer
o.NamedMetrics = ContainerMetrics
o.MetricFilter = co.MetricFilter
o.ResourceFilter = co.ResourceFilter
o.NamespaceName = co.NamespaceName
o.PodName = co.PodName
@@ -134,7 +124,6 @@ func (co ContainerOption) Apply(o *QueryOptions) {
}
type PVCOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
StorageClassName string
@@ -143,20 +132,14 @@ type PVCOption struct {
func (po PVCOption) Apply(o *QueryOptions) {
o.Level = LevelPVC
o.NamedMetrics = PVCMetrics
o.MetricFilter = po.MetricFilter
o.ResourceFilter = po.ResourceFilter
o.NamespaceName = po.NamespaceName
o.StorageClassName = po.StorageClassName
o.PersistentVolumeClaimName = po.PersistentVolumeClaimName
}
type ComponentOption struct {
MetricFilter string
}
type ComponentOption struct{}
func (co ComponentOption) Apply(o *QueryOptions) {
func (_ ComponentOption) Apply(o *QueryOptions) {
o.Level = LevelComponent
o.NamedMetrics = ComponentMetrics
o.MetricFilter = co.MetricFilter
}

View File

@@ -0,0 +1,33 @@
package monitoring
const (
MetricTypeMatrix = "matrix"
MetricTypeVector = "vector"
)
type Metric struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
MetricData `json:"data,omitempty" description:"actual metric result"`
Error string `json:"error,omitempty"`
}
type MetricData struct {
MetricType string `json:"resultType,omitempty" description:"result type, one of matrix, vector"`
MetricValues []MetricValue `json:"result,omitempty" description:"metric data including labels, time series and values"`
}
type Point [2]float64
type MetricValue struct {
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
}
func (p Point) Timestamp() float64 {
return p[0]
}
func (p Point) Value() float64 {
return p[1]
}