monitor: add tests

Signed-off-by: huanggze <loganhuang@yunify.com>
This commit is contained in:
zryfish
2020-04-01 17:41:50 +08:00
committed by huanggze
parent 17013d3519
commit 372a52e70e
63 changed files with 5405 additions and 4462 deletions

View File

@@ -170,7 +170,9 @@ func (d *FakeDevops) GetCredentialInProject(projectId, id string, content bool)
func (d *FakeDevops) GetCredentialsInProject(projectId string) ([]*devops.Credential, error) {
return nil, nil
}
func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) { return nil, nil }
func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) {
return nil, nil
}
// BuildGetter
func (d *FakeDevops) GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*devops.Build, error) {

View File

@@ -2,40 +2,9 @@ package monitoring
import "time"
const (
StatusSuccess = "success"
StatusError = "error"
MetricTypeMatrix = "matrix"
MetricTypeVector = "vector"
)
type Metric struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
Status string `json:"status" description:"result status, one of error, success"`
MetricData `json:"data" description:"actual metric result"`
ErrorType string `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
}
type MetricData struct {
MetricType string `json:"resultType" description:"result type, one of matrix, vector"`
MetricValues []MetricValue `json:"result" description:"metric data including labels, time series and values"`
}
type Point [2]float64
type MetricValue struct {
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
}
type Interface interface {
// The `stmts` defines statements, expressions or rules (eg. promql in Prometheus) for querying specific metrics.
GetMetrics(stmts []string, time time.Time) ([]Metric, error)
GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]Metric, error)
// Get named metrics (eg. node_cpu_usage)
GetNamedMetrics(time time.Time, opt QueryOption) ([]Metric, error)
GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt QueryOption) ([]Metric, error)
GetMetrics(exprs []string, time time.Time) []Metric
GetMetricsOverTime(exprs []string, start, end time.Time, step time.Duration) []Metric
GetNamedMetrics(metrics []string, time time.Time, opt QueryOption) []Metric
GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt QueryOption) []Metric
}

View File

@@ -1,252 +0,0 @@
package monitoring
type MonitoringLevel int
const (
LevelCluster = MonitoringLevel(1) << iota
LevelNode
LevelWorkspace
LevelNamespace
LevelWorkload
LevelPod
LevelContainer
LevelPVC
LevelComponent
)
var ClusterMetrics = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
"cluster_cpu_total",
"cluster_memory_utilisation",
"cluster_memory_available",
"cluster_memory_total",
"cluster_memory_usage_wo_cache",
"cluster_net_utilisation",
"cluster_net_bytes_transmitted",
"cluster_net_bytes_received",
"cluster_disk_read_iops",
"cluster_disk_write_iops",
"cluster_disk_read_throughput",
"cluster_disk_write_throughput",
"cluster_disk_size_usage",
"cluster_disk_size_utilisation",
"cluster_disk_size_capacity",
"cluster_disk_size_available",
"cluster_disk_inode_total",
"cluster_disk_inode_usage",
"cluster_disk_inode_utilisation",
"cluster_namespace_count",
"cluster_pod_count",
"cluster_pod_quota",
"cluster_pod_utilisation",
"cluster_pod_running_count",
"cluster_pod_succeeded_count",
"cluster_pod_abnormal_count",
"cluster_node_online",
"cluster_node_offline",
"cluster_node_total",
"cluster_cronjob_count",
"cluster_pvc_count",
"cluster_daemonset_count",
"cluster_deployment_count",
"cluster_endpoint_count",
"cluster_hpa_count",
"cluster_job_count",
"cluster_statefulset_count",
"cluster_replicaset_count",
"cluster_service_count",
"cluster_secret_count",
"cluster_pv_count",
"cluster_ingresses_extensions_count",
"cluster_load1",
"cluster_load5",
"cluster_load15",
"cluster_pod_abnormal_ratio",
"cluster_node_offline_ratio",
}
var NodeMetrics = []string{
"node_cpu_utilisation",
"node_cpu_total",
"node_cpu_usage",
"node_memory_utilisation",
"node_memory_usage_wo_cache",
"node_memory_available",
"node_memory_total",
"node_net_utilisation",
"node_net_bytes_transmitted",
"node_net_bytes_received",
"node_disk_read_iops",
"node_disk_write_iops",
"node_disk_read_throughput",
"node_disk_write_throughput",
"node_disk_size_capacity",
"node_disk_size_available",
"node_disk_size_usage",
"node_disk_size_utilisation",
"node_disk_inode_total",
"node_disk_inode_usage",
"node_disk_inode_utilisation",
"node_pod_count",
"node_pod_quota",
"node_pod_utilisation",
"node_pod_running_count",
"node_pod_succeeded_count",
"node_pod_abnormal_count",
"node_load1",
"node_load5",
"node_load15",
"node_pod_abnormal_ratio",
}
var WorkspaceMetrics = []string{
"workspace_cpu_usage",
"workspace_memory_usage",
"workspace_memory_usage_wo_cache",
"workspace_net_bytes_transmitted",
"workspace_net_bytes_received",
"workspace_pod_count",
"workspace_pod_running_count",
"workspace_pod_succeeded_count",
"workspace_pod_abnormal_count",
"workspace_ingresses_extensions_count",
"workspace_cronjob_count",
"workspace_pvc_count",
"workspace_daemonset_count",
"workspace_deployment_count",
"workspace_endpoint_count",
"workspace_hpa_count",
"workspace_job_count",
"workspace_statefulset_count",
"workspace_replicaset_count",
"workspace_service_count",
"workspace_secret_count",
"workspace_pod_abnormal_ratio",
}
var NamespaceMetrics = []string{
"namespace_cpu_usage",
"namespace_memory_usage",
"namespace_memory_usage_wo_cache",
"namespace_net_bytes_transmitted",
"namespace_net_bytes_received",
"namespace_pod_count",
"namespace_pod_running_count",
"namespace_pod_succeeded_count",
"namespace_pod_abnormal_count",
"namespace_pod_abnormal_ratio",
"namespace_memory_limit_hard",
"namespace_cpu_limit_hard",
"namespace_pod_count_hard",
"namespace_cronjob_count",
"namespace_pvc_count",
"namespace_daemonset_count",
"namespace_deployment_count",
"namespace_endpoint_count",
"namespace_hpa_count",
"namespace_job_count",
"namespace_statefulset_count",
"namespace_replicaset_count",
"namespace_service_count",
"namespace_secret_count",
"namespace_configmap_count",
"namespace_ingresses_extensions_count",
"namespace_s2ibuilder_count",
}
var WorkloadMetrics = []string{
"workload_cpu_usage",
"workload_memory_usage",
"workload_memory_usage_wo_cache",
"workload_net_bytes_transmitted",
"workload_net_bytes_received",
"workload_deployment_replica",
"workload_deployment_replica_available",
"workload_statefulset_replica",
"workload_statefulset_replica_available",
"workload_daemonset_replica",
"workload_daemonset_replica_available",
"workload_deployment_unavailable_replicas_ratio",
"workload_daemonset_unavailable_replicas_ratio",
"workload_statefulset_unavailable_replicas_ratio",
}
var PodMetrics = []string{
"pod_cpu_usage",
"pod_memory_usage",
"pod_memory_usage_wo_cache",
"pod_net_bytes_transmitted",
"pod_net_bytes_received",
}
var ContainerMetrics = []string{
"container_cpu_usage",
"container_memory_usage",
"container_memory_usage_wo_cache",
}
var PVCMetrics = []string{
"pvc_inodes_available",
"pvc_inodes_used",
"pvc_inodes_total",
"pvc_inodes_utilisation",
"pvc_bytes_available",
"pvc_bytes_used",
"pvc_bytes_total",
"pvc_bytes_utilisation",
}
var ComponentMetrics = []string{
"etcd_server_list",
"etcd_server_total",
"etcd_server_up_total",
"etcd_server_has_leader",
"etcd_server_leader_changes",
"etcd_server_proposals_failed_rate",
"etcd_server_proposals_applied_rate",
"etcd_server_proposals_committed_rate",
"etcd_server_proposals_pending_count",
"etcd_mvcc_db_size",
"etcd_network_client_grpc_received_bytes",
"etcd_network_client_grpc_sent_bytes",
"etcd_grpc_call_rate",
"etcd_grpc_call_failed_rate",
"etcd_grpc_server_msg_received_rate",
"etcd_grpc_server_msg_sent_rate",
"etcd_disk_wal_fsync_duration",
"etcd_disk_wal_fsync_duration_quantile",
"etcd_disk_backend_commit_duration",
"etcd_disk_backend_commit_duration_quantile",
"apiserver_up_sum",
"apiserver_request_rate",
"apiserver_request_by_verb_rate",
"apiserver_request_latencies",
"apiserver_request_by_verb_latencies",
"scheduler_up_sum",
"scheduler_schedule_attempts",
"scheduler_schedule_attempt_rate",
"scheduler_e2e_scheduling_latency",
"scheduler_e2e_scheduling_latency_quantile",
"controller_manager_up_sum",
"coredns_up_sum",
"coredns_cache_hits",
"coredns_cache_misses",
"coredns_dns_request_rate",
"coredns_dns_request_duration",
"coredns_dns_request_duration_quantile",
"coredns_dns_request_by_type_rate",
"coredns_dns_request_by_rcode_rate",
"coredns_panic_rate",
"coredns_proxy_request_rate",
"coredns_proxy_request_duration",
"coredns_proxy_request_duration_quantile",
"prometheus_up_sum",
"prometheus_tsdb_head_samples_appended_rate",
}

View File

@@ -1,178 +1,153 @@
package prometheus
import (
"fmt"
"github.com/json-iterator/go"
"io/ioutil"
"context"
"github.com/prometheus/client_golang/api"
apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
"net/url"
"regexp"
"sync"
"time"
)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// prometheus implements monitoring interface backed by Prometheus
type prometheus struct {
options *Options
client *http.Client
client apiv1.API
}
func NewPrometheus(options *Options) monitoring.Interface {
return &prometheus{
options: options,
client: &http.Client{Timeout: 10 * time.Second},
func NewPrometheus(options *Options) (monitoring.Interface, error) {
cfg := api.Config{
Address: options.Endpoint,
}
client, err := api.NewClient(cfg)
return prometheus{client: apiv1.NewAPI(client)}, err
}
// TODO(huanggze): reserve for custom monitoring
func (p *prometheus) GetMetrics(stmts []string, time time.Time) ([]monitoring.Metric, error) {
func (p prometheus) GetMetrics(stmts []string, time time.Time) []monitoring.Metric {
panic("implement me")
}
// TODO(huanggze): reserve for custom monitoring
func (p *prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]monitoring.Metric, error) {
func (p prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) []monitoring.Metric {
panic("implement me")
}
func (p *prometheus) GetNamedMetrics(ts time.Time, o monitoring.QueryOption) ([]monitoring.Metric, error) {
metrics := make([]monitoring.Metric, 0)
var mtx sync.Mutex // guard metrics
func (p prometheus) GetNamedMetrics(metrics []string, ts time.Time, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
var mtx sync.Mutex
var wg sync.WaitGroup
opts := monitoring.NewQueryOptions()
o.Apply(opts)
errCh := make(chan error)
for _, metric := range opts.NamedMetrics {
matched, _ := regexp.MatchString(opts.MetricFilter, metric)
if matched {
exp := makeExpression(metric, *opts)
wg.Add(1)
go func(metric, exp string) {
res, err := p.query(exp, ts)
if err != nil {
select {
case errCh <- err: // Record error once
default:
}
} else {
res.MetricName = metric // Add metric name
mtx.Lock()
metrics = append(metrics, res)
mtx.Unlock()
}
wg.Done()
}(metric, exp)
}
for _, metric := range metrics {
wg.Add(1)
go func(metric string) {
parsedResp := monitoring.Metric{MetricName: metric}
value, err := p.client.Query(context.Background(), makeExpr(metric, *opts), ts)
if err != nil {
parsedResp.Error = err.(*apiv1.Error).Msg
} else {
parsedResp.MetricData = parseQueryResp(value)
}
mtx.Lock()
res = append(res, parsedResp)
mtx.Unlock()
wg.Done()
}(metric)
}
wg.Wait()
select {
case err := <-errCh:
return nil, err
default:
return metrics, nil
}
return res
}
func (p *prometheus) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, o monitoring.QueryOption) ([]monitoring.Metric, error) {
metrics := make([]monitoring.Metric, 0)
var mtx sync.Mutex // guard metrics
func (p prometheus) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
var mtx sync.Mutex
var wg sync.WaitGroup
opts := monitoring.NewQueryOptions()
o.Apply(opts)
errCh := make(chan error)
for _, metric := range opts.NamedMetrics {
matched, _ := regexp.MatchString(opts.MetricFilter, metric)
if matched {
exp := makeExpression(metric, *opts)
wg.Add(1)
go func(metric, exp string) {
res, err := p.rangeQuery(exp, start, end, step)
if err != nil {
select {
case errCh <- err: // Record error once
default:
}
} else {
res.MetricName = metric // Add metric name
mtx.Lock()
metrics = append(metrics, res)
mtx.Unlock()
}
wg.Done()
}(metric, exp)
}
timeRange := apiv1.Range{
Start: start,
End: end,
Step: step,
}
for _, metric := range metrics {
wg.Add(1)
go func(metric string) {
parsedResp := monitoring.Metric{MetricName: metric}
value, err := p.client.QueryRange(context.Background(), makeExpr(metric, *opts), timeRange)
if err != nil {
parsedResp.Error = err.(*apiv1.Error).Msg
} else {
parsedResp.MetricData = parseQueryRangeResp(value)
}
mtx.Lock()
res = append(res, parsedResp)
mtx.Unlock()
wg.Done()
}(metric)
}
wg.Wait()
select {
case err := <-errCh:
return nil, err
default:
return metrics, nil
}
return res
}
func (p prometheus) query(exp string, ts time.Time) (monitoring.Metric, error) {
params := &url.Values{}
params.Set("time", ts.Format(time.RFC3339))
params.Set("query", exp)
func parseQueryRangeResp(value model.Value) monitoring.MetricData {
res := monitoring.MetricData{MetricType: monitoring.MetricTypeMatrix}
u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode())
data, _ := value.(model.Matrix)
var m monitoring.Metric
response, err := p.client.Get(u)
if err != nil {
return monitoring.Metric{}, err
for _, v := range data {
mv := monitoring.MetricValue{
Metadata: make(map[string]string),
}
for k, v := range v.Metric {
mv.Metadata[string(k)] = string(v)
}
for _, k := range v.Values {
mv.Series = append(mv.Series, monitoring.Point{float64(k.Timestamp) / 1000, float64(k.Value)})
}
res.MetricValues = append(res.MetricValues, mv)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return monitoring.Metric{}, err
}
defer response.Body.Close()
err = json.Unmarshal(body, m)
if err != nil {
return monitoring.Metric{}, err
}
return m, nil
return res
}
func (p prometheus) rangeQuery(exp string, start, end time.Time, step time.Duration) (monitoring.Metric, error) {
params := &url.Values{}
params.Set("start", start.Format(time.RFC3339))
params.Set("end", end.Format(time.RFC3339))
params.Set("step", step.String())
params.Set("query", exp)
func parseQueryResp(value model.Value) monitoring.MetricData {
res := monitoring.MetricData{MetricType: monitoring.MetricTypeVector}
u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode())
data, _ := value.(model.Vector)
var m monitoring.Metric
response, err := p.client.Get(u)
if err != nil {
return monitoring.Metric{}, err
for _, v := range data {
mv := monitoring.MetricValue{
Metadata: make(map[string]string),
}
for k, v := range v.Metric {
mv.Metadata[string(k)] = string(v)
}
mv.Sample = monitoring.Point{float64(v.Timestamp) / 1000, float64(v.Value)}
res.MetricValues = append(res.MetricValues, mv)
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return monitoring.Metric{}, err
}
defer response.Body.Close()
err = json.Unmarshal(body, m)
if err != nil {
return monitoring.Metric{}, err
}
return m, nil
return res
}

View File

@@ -0,0 +1,95 @@
package prometheus
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/json-iterator/go"
"io/ioutil"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestGetNamedMetrics(t *testing.T) {
tests := []struct {
name string
fakeResp string
expected string
}{
{"prom returns good values", "metrics-vector-type-prom.json", "metrics-vector-type-res.json"},
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected, err := jsonFromFile(tt.expected)
if err != nil {
t.Fatal(err)
}
srv := mockPrometheusService("/api/v1/query", tt.fakeResp)
defer srv.Close()
client, _ := NewPrometheus(&Options{Endpoint: srv.URL})
result := client.GetNamedMetrics([]string{"cluster_cpu_utilisation"}, time.Now(), monitoring.ClusterOption{})
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func TestGetNamedMetricsOverTime(t *testing.T) {
tests := []struct {
name string
fakeResp string
expected string
}{
{"prom returns good values", "metrics-matrix-type-prom.json", "metrics-matrix-type-res.json"},
{"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected, err := jsonFromFile(tt.expected)
if err != nil {
t.Fatal(err)
}
srv := mockPrometheusService("/api/v1/query_range", tt.fakeResp)
defer srv.Close()
client, _ := NewPrometheus(&Options{Endpoint: srv.URL})
result := client.GetNamedMetricsOverTime([]string{"cluster_cpu_utilisation"}, time.Now().Add(-time.Minute*3), time.Now(), time.Minute, monitoring.ClusterOption{})
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}
func mockPrometheusService(pattern, fakeResp string) *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
b, _ := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", fakeResp))
res.Write(b)
})
return httptest.NewServer(mux)
}
func jsonFromFile(expectedFile string) ([]monitoring.Metric, error) {
expectedJson := []monitoring.Metric{}
json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
if err != nil {
return expectedJson, err
}
err = jsoniter.Unmarshal(json, &expectedJson)
if err != nil {
return expectedJson, err
}
return expectedJson, nil
}

View File

@@ -25,7 +25,6 @@ const (
Deployment = "Deployment"
)
//TODO(huanggze): move this part to a ConfigMap
var promQLTemplates = map[string]string{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
@@ -256,31 +255,33 @@ var promQLTemplates = map[string]string{
"prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`,
}
func makeExpression(metric string, opt monitoring.QueryOptions) string {
func makeExpr(metric string, opt monitoring.QueryOptions) string {
tmpl := promQLTemplates[metric]
switch opt.Level {
case monitoring.LevelCluster:
return tmpl
case monitoring.LevelNode:
makeNodeMetricExpression(tmpl, opt)
return makeNodeMetricExpr(tmpl, opt)
case monitoring.LevelWorkspace:
makeWorkspaceMetricExpression(tmpl, opt)
return makeWorkspaceMetricExpr(tmpl, opt)
case monitoring.LevelNamespace:
makeNamespaceMetricExpression(tmpl, opt)
return makeNamespaceMetricExpr(tmpl, opt)
case monitoring.LevelWorkload:
makeWorkloadMetricExpression(tmpl, opt)
return makeWorkloadMetricExpr(tmpl, opt)
case monitoring.LevelPod:
makePodMetricExpression(tmpl, opt)
return makePodMetricExpr(tmpl, opt)
case monitoring.LevelContainer:
makeContainerMetricExpression(tmpl, opt)
return makeContainerMetricExpr(tmpl, opt)
case monitoring.LevelPVC:
makePVCMetricExpression(tmpl, opt)
return makePVCMetricExpr(tmpl, opt)
case monitoring.LevelComponent:
return tmpl
default:
return tmpl
}
return tmpl
}
func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeNodeMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var nodeSelector string
if o.NodeName != "" {
nodeSelector = fmt.Sprintf(`node="%s"`, o.NodeName)
@@ -290,7 +291,7 @@ func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string {
return strings.Replace(tmpl, "$1", nodeSelector, -1)
}
func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeWorkspaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var workspaceSelector string
if o.WorkspaceName != "" {
workspaceSelector = fmt.Sprintf(`label_kubesphere_io_workspace="%s"`, o.WorkspaceName)
@@ -300,7 +301,7 @@ func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", workspaceSelector, -1)
}
func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeNamespaceMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var namespaceSelector string
// For monitoring namespaces in the specific workspace
@@ -321,7 +322,7 @@ func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", namespaceSelector, -1)
}
func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeWorkloadMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var kindSelector, workloadSelector string
switch o.WorkloadKind {
case "deployment":
@@ -341,7 +342,7 @@ func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string
return strings.NewReplacer("$1", workloadSelector, "$2", kindSelector).Replace(tmpl)
}
func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makePodMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var podSelector, workloadSelector string
// For monitoriong pods of the specific workload
@@ -371,7 +372,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
// For monitoring pods on the specific node
// GET /nodes/{node}/pods/{pod}
if o.PodName != "" {
if o.NodeName != "" {
if o.PodName != "" {
podSelector = fmt.Sprintf(`pod="%s", node="%s"`, o.PodName, o.NodeName)
} else {
@@ -381,7 +382,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string {
return strings.NewReplacer("$1", workloadSelector, "$2", podSelector).Replace(tmpl)
}
func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makeContainerMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var containerSelector string
if o.ContainerName != "" {
containerSelector = fmt.Sprintf(`pod_name="%s", namespace="%s", container_name="%s"`, o.PodName, o.NamespaceName, o.ContainerName)
@@ -391,7 +392,7 @@ func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) strin
return strings.Replace(tmpl, "$1", containerSelector, -1)
}
func makePVCMetricExpression(tmpl string, o monitoring.QueryOptions) string {
func makePVCMetricExpr(tmpl string, o monitoring.QueryOptions) string {
var pvcSelector string
// For monitoring persistentvolumeclaims in the specific namespace

View File

@@ -0,0 +1,45 @@
package prometheus
import (
"github.com/google/go-cmp/cmp"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus/testdata"
"testing"
)
func TestMakeExpr(t *testing.T) {
tests := []struct {
name string
opt monitoring.QueryOptions
}{
{"cluster_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelCluster}},
{"node_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelNode, NodeName: "i-2dazc1d6"}},
{"node_cpu_total", monitoring.QueryOptions{Level: monitoring.LevelNode, ResourceFilter: "i-2dazc1d6|i-ezjb7gsk"}},
{"workspace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, WorkspaceName: "system-workspace"}},
{"workspace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, ResourceFilter: "system-workspace|demo"}},
{"namespace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, NamespaceName: "kube-system"}},
{"namespace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, ResourceFilter: "kube-system|default"}},
{"namespace_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelNamespace, WorkspaceName: "system-workspace", ResourceFilter: "kube-system|default"}},
{"workload_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: "deployment", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
{"workload_deployment_replica_available", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: ".*", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}},
{"pod_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", WorkloadKind: "deployment", WorkloadName: "elasticsearch", ResourceFilter: "elasticsearch-0"}},
{"pod_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", PodName: "elasticsearch-12345"}},
{"pod_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelPod, NodeName: "i-2dazc1d6", PodName: "elasticsearch-12345"}},
{"container_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ContainerName: "syscall"}},
{"container_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ResourceFilter: "syscall"}},
{"pvc_inodes_available", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", PersistentVolumeClaimName: "db-123"}},
{"pvc_inodes_used", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", ResourceFilter: "db-123"}},
{"pvc_inodes_total", monitoring.QueryOptions{Level: monitoring.LevelPVC, StorageClassName: "default", ResourceFilter: "db-123"}},
{"etcd_server_list", monitoring.QueryOptions{Level: monitoring.LevelComponent}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
expected := testdata.PromQLs[tt.name]
result := makeExpr(tt.name, tt.opt)
if diff := cmp.Diff(result, expected); diff != "" {
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
}
})
}
}

View File

@@ -0,0 +1,5 @@
{
"status":"error",
"errorType":"internal",
"error":"inconsistent body for response code"
}

View File

@@ -0,0 +1,6 @@
[
{
"metric_name": "cluster_cpu_utilisation",
"error": "inconsistent body for response code"
}
]

View File

@@ -0,0 +1,206 @@
{
"status":"success",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"mysql-exporter",
"instance":"10.233.99.71:9104",
"job":"mysql-sz197k-prometheus-mysql-exporter",
"namespace":"exporter",
"pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9",
"service":"mysql-sz197k-prometheus-mysql-exporter"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"web",
"instance":"10.233.99.22:9090",
"job":"prometheus-k8s-system",
"namespace":"kubesphere-monitoring-system",
"pod":"prometheus-k8s-system-0",
"service":"prometheus-k8s-system"
},
"values":[
[
1585743925,
"1.123456"
],
[
1585744045,
"1.123456"
],
[
1585744165,
"1.123456"
],
[
1585744285,
"1.123456"
],
[
1585744405,
"1.123456"
]
]
}
]
}
}

View File

@@ -0,0 +1,208 @@
[
{
"metric_name":"cluster_cpu_utilisation",
"data":{
"resultType":"matrix",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"mysql-exporter",
"instance":"10.233.99.71:9104",
"job":"mysql-sz197k-prometheus-mysql-exporter",
"namespace":"exporter",
"pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9",
"service":"mysql-sz197k-prometheus-mysql-exporter"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
},
{
"metric":{
"__name__":"up",
"endpoint":"web",
"instance":"10.233.99.22:9090",
"job":"prometheus-k8s-system",
"namespace":"kubesphere-monitoring-system",
"pod":"prometheus-k8s-system-0",
"service":"prometheus-k8s-system"
},
"values":[
[
1585743925,
1.123456
],
[
1585744045,
1.123456
],
[
1585744165,
1.123456
],
[
1585744285,
1.123456
],
[
1585744405,
1.123456
]
]
}
]
}
}
]

View File

@@ -0,0 +1,68 @@
{
"status":"success",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"value":[
1585743854.077,
"1.123456"
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
"1.123456"
]
}
]
}
}

View File

@@ -0,0 +1,70 @@
[
{
"metric_name":"cluster_cpu_utilisation",
"data":{
"resultType":"vector",
"result":[
{
"metric":{
"__name__":"up",
"endpoint":"https",
"instance":"192.168.2.2:9100",
"job":"node-exporter",
"namespace":"kubesphere-monitoring-system",
"pod":"node-exporter-nxpld",
"service":"node-exporter"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-main",
"instance":"10.233.99.18:8443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-metrics",
"instance":"192.168.2.2:10250",
"job":"kubelet",
"namespace":"kube-system",
"node":"ks-allinone",
"service":"kubelet"
},
"value":[
1585743854.077,
1.123456
]
},
{
"metric":{
"__name__":"up",
"endpoint":"https-self",
"instance":"10.233.99.18:9443",
"job":"kube-state-metrics",
"namespace":"kubesphere-monitoring-system",
"pod":"kube-state-metrics-566cdbcb48-98brh",
"service":"kube-state-metrics"
},
"value":[
1585743854.077,
1.123456
]
}
]
}
}
]

View File

@@ -0,0 +1,23 @@
package testdata
var PromQLs = map[string]string{
"cluster_cpu_utilisation": `:node_cpu_utilisation:avg1m`,
"node_cpu_utilisation": `node:node_cpu_utilisation:avg1m{node="i-2dazc1d6"}`,
"node_cpu_total": `node:node_num_cpu:sum{node=~"i-2dazc1d6|i-ezjb7gsk"}`,
"workspace_cpu_usage": `round(sum by (label_kubesphere_io_workspace) (namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", label_kubesphere_io_workspace="system-workspace"}), 0.001)`,
"workspace_memory_usage": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes:sum{namespace!="", label_kubesphere_io_workspace=~"system-workspace|demo", label_kubesphere_io_workspace!=""})`,
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace="kube-system"}, 0.001)`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", namespace=~"kube-system|default"}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", label_kubesphere_io_workspace="system-workspace", namespace=~"kube-system|default"}`,
"workload_cpu_usage": `round(namespace:workload_cpu_usage:sum{namespace="default", workload=~"Deployment:apiserver|coredns"}, 0.001)`,
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="default"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{owner_kind="ReplicaSet", owner_name=~"^deployment-[^-]{1,10}$"} * on (namespace, pod) group_left(node) kube_pod_info{pod=~"elasticsearch-0", namespace="default"}, 0.001)`,
"pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", namespace="default"}`,
"pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_working_set_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", node="i-2dazc1d6"}`,
"container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name="syscall"}[5m])), 0.001)`,
"container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name=~"syscall"})`,
"pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim="db-123"}`,
"pvc_inodes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim=~"db-123"}`,
"pvc_inodes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{storageclass="default", persistentvolumeclaim=~"db-123"}`,
"etcd_server_list": `label_replace(up{job="etcd"}, "node_ip", "$1", "instance", "(.*):.*")`,
}

View File

@@ -1,14 +1,26 @@
package monitoring
type Level int
const (
LevelCluster = 1 << iota
LevelNode
LevelWorkspace
LevelNamespace
LevelWorkload
LevelPod
LevelContainer
LevelPVC
LevelComponent
)
type QueryOption interface {
Apply(*QueryOptions)
}
type QueryOptions struct {
Level MonitoringLevel
NamedMetrics []string
Level Level
MetricFilter string
ResourceFilter string
NodeName string
WorkspaceName string
@@ -25,44 +37,35 @@ func NewQueryOptions() *QueryOptions {
return &QueryOptions{}
}
type ClusterOption struct {
MetricFilter string
}
type ClusterOption struct{}
func (co ClusterOption) Apply(o *QueryOptions) {
func (_ ClusterOption) Apply(o *QueryOptions) {
o.Level = LevelCluster
o.NamedMetrics = ClusterMetrics
}
type NodeOption struct {
MetricFilter string
ResourceFilter string
NodeName string
}
func (no NodeOption) Apply(o *QueryOptions) {
o.Level = LevelNode
o.NamedMetrics = NodeMetrics
o.ResourceFilter = no.ResourceFilter
o.NodeName = no.NodeName
}
type WorkspaceOption struct {
MetricFilter string
ResourceFilter string
WorkspaceName string
}
func (wo WorkspaceOption) Apply(o *QueryOptions) {
o.Level = LevelWorkspace
o.NamedMetrics = WorkspaceMetrics
o.MetricFilter = wo.MetricFilter
o.ResourceFilter = wo.ResourceFilter
o.WorkspaceName = wo.WorkspaceName
}
type NamespaceOption struct {
MetricFilter string
ResourceFilter string
WorkspaceName string
NamespaceName string
@@ -70,33 +73,25 @@ type NamespaceOption struct {
func (no NamespaceOption) Apply(o *QueryOptions) {
o.Level = LevelNamespace
o.NamedMetrics = NamespaceMetrics
o.MetricFilter = no.MetricFilter
o.ResourceFilter = no.ResourceFilter
o.WorkspaceName = no.WorkspaceName
o.NamespaceName = no.NamespaceName
}
type WorkloadOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
WorkloadKind string
WorkloadName string
}
func (wo WorkloadOption) Apply(o *QueryOptions) {
o.Level = LevelWorkload
o.NamedMetrics = WorkspaceMetrics
o.MetricFilter = wo.MetricFilter
o.ResourceFilter = wo.ResourceFilter
o.NamespaceName = wo.NamespaceName
o.WorkloadKind = wo.WorkloadKind
o.WorkloadName = wo.WorkloadName
}
type PodOption struct {
MetricFilter string
ResourceFilter string
NodeName string
NamespaceName string
@@ -107,8 +102,6 @@ type PodOption struct {
func (po PodOption) Apply(o *QueryOptions) {
o.Level = LevelPod
o.NamedMetrics = PodMetrics
o.MetricFilter = po.MetricFilter
o.ResourceFilter = po.ResourceFilter
o.NamespaceName = po.NamespaceName
o.WorkloadKind = po.WorkloadKind
@@ -116,7 +109,6 @@ func (po PodOption) Apply(o *QueryOptions) {
}
type ContainerOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
PodName string
@@ -125,8 +117,6 @@ type ContainerOption struct {
func (co ContainerOption) Apply(o *QueryOptions) {
o.Level = LevelContainer
o.NamedMetrics = ContainerMetrics
o.MetricFilter = co.MetricFilter
o.ResourceFilter = co.ResourceFilter
o.NamespaceName = co.NamespaceName
o.PodName = co.PodName
@@ -134,7 +124,6 @@ func (co ContainerOption) Apply(o *QueryOptions) {
}
type PVCOption struct {
MetricFilter string
ResourceFilter string
NamespaceName string
StorageClassName string
@@ -143,20 +132,14 @@ type PVCOption struct {
func (po PVCOption) Apply(o *QueryOptions) {
o.Level = LevelPVC
o.NamedMetrics = PVCMetrics
o.MetricFilter = po.MetricFilter
o.ResourceFilter = po.ResourceFilter
o.NamespaceName = po.NamespaceName
o.StorageClassName = po.StorageClassName
o.PersistentVolumeClaimName = po.PersistentVolumeClaimName
}
type ComponentOption struct {
MetricFilter string
}
type ComponentOption struct{}
func (co ComponentOption) Apply(o *QueryOptions) {
func (_ ComponentOption) Apply(o *QueryOptions) {
o.Level = LevelComponent
o.NamedMetrics = ComponentMetrics
o.MetricFilter = co.MetricFilter
}

View File

@@ -0,0 +1,33 @@
package monitoring
const (
MetricTypeMatrix = "matrix"
MetricTypeVector = "vector"
)
type Metric struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
MetricData `json:"data,omitempty" description:"actual metric result"`
Error string `json:"error,omitempty"`
}
type MetricData struct {
MetricType string `json:"resultType,omitempty" description:"result type, one of matrix, vector"`
MetricValues []MetricValue `json:"result,omitempty" description:"metric data including labels, time series and values"`
}
type Point [2]float64
type MetricValue struct {
Metadata map[string]string `json:"metric,omitempty" description:"time series labels"`
Sample Point `json:"value,omitempty" description:"time series, values of vector type"`
Series []Point `json:"values,omitempty" description:"time series, values of matrix type"`
}
func (p Point) Timestamp() float64 {
return p[0]
}
func (p Point) Value() float64 {
return p[1]
}