Merge pull request #756 from huanggze/monitor-2.1-a

refactor monitor module
This commit is contained in:
KubeSphere CI Bot
2019-09-19 13:44:53 +08:00
committed by GitHub
13 changed files with 1711 additions and 2875 deletions

View File

@@ -0,0 +1,41 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
const (
MonitorLevelCluster = "cluster"
MonitorLevelNode = "node"
MonitorLevelWorkspace = "workspace"
MonitorLevelNamespace = "namespace"
MonitorLevelPod = "pod"
MonitorLevelContainer = "container"
MonitorLevelPVC = "pvc"
MonitorLevelWorkload = "workload"
MonitorLevelComponent = "component"
ChannelMaxCapacity = 100
// prometheus query type
RangeQuery = "query_range"
Query = "query"
DefaultQueryStep = "10m"
StatefulSet = "StatefulSet"
DaemonSet = "DaemonSet"
Deployment = "Deployment"
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,502 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
const (
// TODO: expose the following metrics in prometheus format
MetricClusterWorkspaceCount = "cluster_workspace_count"
MetricClusterAccountCount = "cluster_account_count"
MetricClusterNamespaceCount = "cluster_namespace_count"
MetricClusterDevopsCount = "cluster_devops_project_count"
MetricWorkspaceNamespaceCount = "workspace_namespace_count"
MetricWorkspaceDevopsCount = "workspace_devops_project_count"
MetricWorkspaceMemberCount = "workspace_member_count"
MetricWorkspaceRoleCount = "workspace_role_count"
)
var clusterMetrics = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
"cluster_cpu_total",
"cluster_memory_utilisation",
"cluster_memory_available",
"cluster_memory_total",
"cluster_memory_usage_wo_cache",
"cluster_net_utilisation",
"cluster_net_bytes_transmitted",
"cluster_net_bytes_received",
"cluster_disk_read_iops",
"cluster_disk_write_iops",
"cluster_disk_read_throughput",
"cluster_disk_write_throughput",
"cluster_disk_size_usage",
"cluster_disk_size_utilisation",
"cluster_disk_size_capacity",
"cluster_disk_size_available",
"cluster_disk_inode_total",
"cluster_disk_inode_usage",
"cluster_disk_inode_utilisation",
"cluster_namespace_count",
"cluster_pod_count",
"cluster_pod_quota",
"cluster_pod_utilisation",
"cluster_pod_running_count",
"cluster_pod_succeeded_count",
"cluster_pod_abnormal_count",
"cluster_node_online",
"cluster_node_offline",
"cluster_node_total",
"cluster_cronjob_count",
"cluster_pvc_count",
"cluster_daemonset_count",
"cluster_deployment_count",
"cluster_endpoint_count",
"cluster_hpa_count",
"cluster_job_count",
"cluster_statefulset_count",
"cluster_replicaset_count",
"cluster_service_count",
"cluster_secret_count",
"cluster_pv_count",
"cluster_ingresses_extensions_count",
"cluster_load1",
"cluster_load5",
"cluster_load15",
"cluster_pod_abnormal_ratio",
"cluster_node_offline_ratio",
}
var nodeMetrics = []string{
"node_cpu_utilisation",
"node_cpu_total",
"node_cpu_usage",
"node_memory_utilisation",
"node_memory_usage_wo_cache",
"node_memory_available",
"node_memory_total",
"node_net_utilisation",
"node_net_bytes_transmitted",
"node_net_bytes_received",
"node_disk_read_iops",
"node_disk_write_iops",
"node_disk_read_throughput",
"node_disk_write_throughput",
"node_disk_size_capacity",
"node_disk_size_available",
"node_disk_size_usage",
"node_disk_size_utilisation",
"node_disk_inode_total",
"node_disk_inode_usage",
"node_disk_inode_utilisation",
"node_pod_count",
"node_pod_quota",
"node_pod_utilisation",
"node_pod_running_count",
"node_pod_succeeded_count",
"node_pod_abnormal_count",
"node_load1",
"node_load5",
"node_load15",
"node_pod_abnormal_ratio",
}
var workspaceMetrics = []string{
"workspace_cpu_usage",
"workspace_memory_usage",
"workspace_memory_usage_wo_cache",
"workspace_net_bytes_transmitted",
"workspace_net_bytes_received",
"workspace_pod_count",
"workspace_pod_running_count",
"workspace_pod_succeeded_count",
"workspace_pod_abnormal_count",
"workspace_ingresses_extensions_count",
"workspace_cronjob_count",
"workspace_pvc_count",
"workspace_daemonset_count",
"workspace_deployment_count",
"workspace_endpoint_count",
"workspace_hpa_count",
"workspace_job_count",
"workspace_statefulset_count",
"workspace_replicaset_count",
"workspace_service_count",
"workspace_secret_count",
"workspace_pod_abnormal_ratio",
}
var namespaceMetrics = []string{
"namespace_cpu_usage",
"namespace_memory_usage",
"namespace_memory_usage_wo_cache",
"namespace_net_bytes_transmitted",
"namespace_net_bytes_received",
"namespace_pod_count",
"namespace_pod_running_count",
"namespace_pod_succeeded_count",
"namespace_pod_abnormal_count",
"namespace_pod_abnormal_ratio",
"namespace_memory_limit_hard",
"namespace_cpu_limit_hard",
"namespace_pod_count_hard",
"namespace_cronjob_count",
"namespace_pvc_count",
"namespace_daemonset_count",
"namespace_deployment_count",
"namespace_endpoint_count",
"namespace_hpa_count",
"namespace_job_count",
"namespace_statefulset_count",
"namespace_replicaset_count",
"namespace_service_count",
"namespace_secret_count",
"namespace_ingresses_extensions_count",
"namespace_s2ibuilder_count",
}
var workloadMetrics = []string{
// TODO: the following four metrics are deprecated.
"workload_pod_cpu_usage",
"workload_pod_memory_usage_wo_cache",
"workload_pod_net_bytes_transmitted",
"workload_pod_net_bytes_received",
"workload_cpu_usage",
"workload_memory_usage_wo_cache",
"workload_net_bytes_transmitted",
"workload_net_bytes_received",
"workload_deployment_replica",
"workload_deployment_replica_available",
"workload_statefulset_replica",
"workload_statefulset_replica_available",
"workload_daemonset_replica",
"workload_daemonset_replica_available",
"workload_deployment_unavailable_replicas_ratio",
"workload_daemonset_unavailable_replicas_ratio",
"workload_statefulset_unavailable_replicas_ratio",
}
var podMetrics = []string{
"pod_cpu_usage",
"pod_memory_usage",
"pod_memory_usage_wo_cache",
"pod_net_bytes_transmitted",
"pod_net_bytes_received",
}
var containerMetrics = []string{
"container_cpu_usage",
"container_memory_usage",
"container_memory_usage_wo_cache",
}
var pvcMetrics = []string{
"pvc_inodes_available",
"pvc_inodes_used",
"pvc_inodes_total",
"pvc_inodes_utilisation",
"pvc_bytes_available",
"pvc_bytes_used",
"pvc_bytes_total",
"pvc_bytes_utilisation",
}
var componentMetrics = []string{
"etcd_server_list",
"etcd_server_total",
"etcd_server_up_total",
"etcd_server_has_leader",
"etcd_server_leader_changes",
"etcd_server_proposals_failed_rate",
"etcd_server_proposals_applied_rate",
"etcd_server_proposals_committed_rate",
"etcd_server_proposals_pending_count",
"etcd_mvcc_db_size",
"etcd_network_client_grpc_received_bytes",
"etcd_network_client_grpc_sent_bytes",
"etcd_grpc_call_rate",
"etcd_grpc_call_failed_rate",
"etcd_grpc_server_msg_received_rate",
"etcd_grpc_server_msg_sent_rate",
"etcd_disk_wal_fsync_duration",
"etcd_disk_wal_fsync_duration_quantile",
"etcd_disk_backend_commit_duration",
"etcd_disk_backend_commit_duration_quantile",
"apiserver_up_sum",
"apiserver_request_rate",
"apiserver_request_by_verb_rate",
"apiserver_request_latencies",
"apiserver_request_by_verb_latencies",
"scheduler_up_sum",
"scheduler_schedule_attempts",
"scheduler_schedule_attempt_rate",
"scheduler_e2e_scheduling_latency",
"scheduler_e2e_scheduling_latency_quantile",
"controller_manager_up_sum",
"coredns_up_sum",
"coredns_cache_hits",
"coredns_cache_misses",
"coredns_dns_request_rate",
"coredns_dns_request_duration",
"coredns_dns_request_duration_quantile",
"coredns_dns_request_by_type_rate",
"coredns_dns_request_by_rcode_rate",
"coredns_panic_rate",
"coredns_proxy_request_rate",
"coredns_proxy_request_duration",
"coredns_proxy_request_duration_quantile",
"prometheus_up_sum",
"prometheus_tsdb_head_samples_appended_rate",
}
var metricsPromqlMap = map[string]string{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
"cluster_cpu_usage": `round(:node_cpu_utilisation:avg1m * sum(node:node_num_cpu:sum), 0.001)`,
"cluster_cpu_total": "sum(node:node_num_cpu:sum)",
"cluster_memory_utilisation": ":node_memory_utilisation:",
"cluster_memory_available": "sum(node:node_memory_bytes_available:sum)",
"cluster_memory_total": "sum(node:node_memory_bytes_total:sum)",
"cluster_memory_usage_wo_cache": "sum(node:node_memory_bytes_total:sum) - sum(node:node_memory_bytes_available:sum)",
"cluster_net_utilisation": ":node_net_utilisation:sum_irate",
"cluster_net_bytes_transmitted": "sum(node:node_net_bytes_transmitted:sum_irate)",
"cluster_net_bytes_received": "sum(node:node_net_bytes_received:sum_irate)",
"cluster_disk_read_iops": "sum(node:data_volume_iops_reads:sum)",
"cluster_disk_write_iops": "sum(node:data_volume_iops_writes:sum)",
"cluster_disk_read_throughput": "sum(node:data_volume_throughput_bytes_read:sum)",
"cluster_disk_write_throughput": "sum(node:data_volume_throughput_bytes_written:sum)",
"cluster_disk_size_usage": `sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} - node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, instance))`,
"cluster_disk_size_utilisation": `cluster:disk_utilization:ratio`,
"cluster_disk_size_capacity": `sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, instance))`,
"cluster_disk_size_available": `sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, instance))`,
"cluster_disk_inode_total": `sum(node:node_inodes_total:)`,
"cluster_disk_inode_usage": `sum(node:node_inodes_total:) - sum(node:node_inodes_free:)`,
"cluster_disk_inode_utilisation": `cluster:disk_inode_utilization:ratio`,
"cluster_namespace_count": `count(kube_namespace_annotations)`,
"cluster_pod_count": `cluster:pod:sum`,
"cluster_pod_quota": `sum(max(kube_node_status_capacity_pods) by (node) unless on (node) (kube_node_status_condition{condition="Ready",status=~"unknown|false"} > 0))`,
"cluster_pod_utilisation": `cluster:pod_utilization:ratio`,
"cluster_pod_running_count": `cluster:pod_running:count`,
"cluster_pod_succeeded_count": `count(kube_pod_info unless on (pod) (kube_pod_status_phase{phase=~"Failed|Pending|Unknown|Running"} > 0) unless on (node) (kube_node_status_condition{condition="Ready",status=~"unknown|false"} > 0))`,
"cluster_pod_abnormal_count": `cluster:pod_abnormal:sum`,
"cluster_node_online": `sum(kube_node_status_condition{condition="Ready",status="true"})`,
"cluster_node_offline": `cluster:node_offline:sum`,
"cluster_node_total": `sum(kube_node_status_condition{condition="Ready"})`,
"cluster_cronjob_count": `sum(kube_cronjob_labels)`,
"cluster_pvc_count": `sum(kube_persistentvolumeclaim_info)`,
"cluster_daemonset_count": `sum(kube_daemonset_labels)`,
"cluster_deployment_count": `sum(kube_deployment_labels)`,
"cluster_endpoint_count": `sum(kube_endpoint_labels)`,
"cluster_hpa_count": `sum(kube_hpa_labels)`,
"cluster_job_count": `sum(kube_job_labels)`,
"cluster_statefulset_count": `sum(kube_statefulset_labels)`,
"cluster_replicaset_count": `count(kube_replicaset_created)`,
"cluster_service_count": `sum(kube_service_info)`,
"cluster_secret_count": `sum(kube_secret_info)`,
"cluster_pv_count": `sum(kube_persistentvolume_labels)`,
"cluster_ingresses_extensions_count": `sum(kube_ingress_labels)`,
"cluster_load1": `sum(node_load1{job="node-exporter"}) / sum(node:node_num_cpu:sum)`,
"cluster_load5": `sum(node_load5{job="node-exporter"}) / sum(node:node_num_cpu:sum)`,
"cluster_load15": `sum(node_load15{job="node-exporter"}) / sum(node:node_num_cpu:sum)`,
"cluster_pod_abnormal_ratio": `cluster:pod_abnormal:ratio`,
"cluster_node_offline_ratio": `cluster:node_offline:ratio`,
//node
"node_cpu_utilisation": "node:node_cpu_utilisation:avg1m{$1}",
"node_cpu_total": "node:node_num_cpu:sum{$1}",
"node_memory_utilisation": "node:node_memory_utilisation:{$1}",
"node_memory_available": "node:node_memory_bytes_available:sum{$1}",
"node_memory_total": "node:node_memory_bytes_total:sum{$1}",
"node_memory_usage_wo_cache": "node:node_memory_bytes_total:sum{$1} - node:node_memory_bytes_available:sum{$1}",
"node_net_utilisation": "node:node_net_utilisation:sum_irate{$1}",
"node_net_bytes_transmitted": "node:node_net_bytes_transmitted:sum_irate{$1}",
"node_net_bytes_received": "node:node_net_bytes_received:sum_irate{$1}",
"node_disk_read_iops": "node:data_volume_iops_reads:sum{$1}",
"node_disk_write_iops": "node:data_volume_iops_writes:sum{$1}",
"node_disk_read_throughput": "node:data_volume_throughput_bytes_read:sum{$1}",
"node_disk_write_throughput": "node:data_volume_throughput_bytes_written:sum{$1}",
"node_disk_size_capacity": `sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{$1}) by (device, node)) by (node)`,
"node_disk_size_available": `node:disk_space_available:{$1}`,
"node_disk_size_usage": `sum(max((node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} - node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{$1}) by (device, node)) by (node)`,
"node_disk_size_utilisation": `node:disk_space_utilization:ratio{$1}`,
"node_disk_inode_total": `node:node_inodes_total:{$1}`,
"node_disk_inode_usage": `node:node_inodes_total:{$1} - node:node_inodes_free:{$1}`,
"node_disk_inode_utilisation": `node:disk_inode_utilization:ratio{$1}`,
"node_pod_count": `node:pod_count:sum{$1}`,
"node_pod_quota": `max(kube_node_status_capacity_pods{$1}) by (node) unless on (node) (kube_node_status_condition{condition="Ready",status=~"unknown|false"} > 0)`,
"node_pod_utilisation": `node:pod_utilization:ratio{$1}`,
"node_pod_running_count": `node:pod_running:count{$1}`,
"node_pod_succeeded_count": `node:pod_succeeded:count{$1}`,
"node_pod_abnormal_count": `node:pod_abnormal:count{$1}`,
"node_cpu_usage": `round(node:node_cpu_utilisation:avg1m{$1} * node:node_num_cpu:sum{$1}, 0.001)`,
"node_load1": `node:load1:ratio{$1}`,
"node_load5": `node:load5:ratio{$1}`,
"node_load15": `node:load15:ratio{$1}`,
"node_pod_abnormal_ratio": `node:pod_abnormal:ratio{$1}`,
// workspace
"workspace_cpu_usage": `round(sum by (label_kubesphere_io_workspace) (namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", $1}), 0.001)`,
"workspace_memory_usage": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes:sum{namespace!="", $1})`,
"workspace_memory_usage_wo_cache": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", $1})`,
"workspace_net_bytes_transmitted": `sum by (label_kubesphere_io_workspace) (sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"workspace_net_bytes_received": `sum by (label_kubesphere_io_workspace) (sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{})`,
"workspace_pod_count": `sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_pod_running_count": `sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase="Running", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_pod_succeeded_count": `sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase="Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_pod_abnormal_count": `count by (label_kubesphere_io_workspace) ((kube_pod_info{node!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) (kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0)) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_ingresses_extensions_count": `sum by (label_kubesphere_io_workspace) (kube_ingress_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_cronjob_count": `sum by (label_kubesphere_io_workspace) (kube_cronjob_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_pvc_count": `sum by (label_kubesphere_io_workspace) (kube_persistentvolumeclaim_info{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_daemonset_count": `sum by (label_kubesphere_io_workspace) (kube_daemonset_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_deployment_count": `sum by (label_kubesphere_io_workspace) (kube_deployment_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_endpoint_count": `sum by (label_kubesphere_io_workspace) (kube_endpoint_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_hpa_count": `sum by (label_kubesphere_io_workspace) (kube_hpa_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_job_count": `sum by (label_kubesphere_io_workspace) (kube_job_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_statefulset_count": `sum by (label_kubesphere_io_workspace) (kube_statefulset_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_replicaset_count": `count by (label_kubesphere_io_workspace) (kube_replicaset_created{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_service_count": `sum by (label_kubesphere_io_workspace) (kube_service_info{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_secret_count": `sum by (label_kubesphere_io_workspace) (kube_secret_info{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
"workspace_pod_abnormal_ratio": `count by (label_kubesphere_io_workspace) ((kube_pod_info{node!=""} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) (kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0)) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1}) / sum by (label_kubesphere_io_workspace) (kube_pod_status_phase{phase!="Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{$1}))`,
//namespace
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", $1}, 0.001)`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", $1}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", $1}`,
"namespace_net_bytes_transmitted": `sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m]) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_net_bytes_received": `sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m]) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_pod_count": `sum by (namespace) (kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_pod_running_count": `sum by (namespace) (kube_pod_status_phase{phase="Running", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_pod_succeeded_count": `sum by (namespace) (kube_pod_status_phase{phase="Succeeded", namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_pod_abnormal_count": `namespace:pod_abnormal:count{namespace!="", $1}`,
"namespace_pod_abnormal_ratio": `namespace:pod_abnormal:ratio{namespace!="", $1}`,
"namespace_memory_limit_hard": `min by (namespace) (kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", resource="limits.memory"} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_cpu_limit_hard": `min by (namespace) (kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", resource="limits.cpu"} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_pod_count_hard": `min by (namespace) (kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", resource="count/pods"} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_cronjob_count": `sum by (namespace) (kube_cronjob_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_pvc_count": `sum by (namespace) (kube_persistentvolumeclaim_info{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_daemonset_count": `sum by (namespace) (kube_daemonset_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_deployment_count": `sum by (namespace) (kube_deployment_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_endpoint_count": `sum by (namespace) (kube_endpoint_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_hpa_count": `sum by (namespace) (kube_hpa_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_job_count": `sum by (namespace) (kube_job_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_statefulset_count": `sum by (namespace) (kube_statefulset_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_replicaset_count": `count by (namespace) (kube_replicaset_created{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_service_count": `sum by (namespace) (kube_service_info{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_secret_count": `sum by (namespace) (kube_secret_info{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_ingresses_extensions_count": `sum by (namespace) (kube_ingress_labels{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
"namespace_s2ibuilder_count": `sum by (namespace) (s2i_s2ibuilder_created{namespace!=""} * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels{$1})`,
// workload
// TODO: the following four metrics are deprecated.
"workload_pod_cpu_usage": `round(namespace:workload_cpu_usage:sum{$1}, 0.001)`,
"workload_pod_memory_usage_wo_cache": `namespace:workload_memory_usage_wo_cache:sum{$1}`,
"workload_pod_net_bytes_transmitted": `namespace:workload_net_bytes_transmitted:sum_irate{$1}`,
"workload_pod_net_bytes_received": `namespace:workload_net_bytes_received:sum_irate{$1}`,
"workload_cpu_usage": `round(namespace:workload_cpu_usage:sum{$1}, 0.001)`,
"workload_memory_usage_wo_cache": `namespace:workload_memory_usage_wo_cache:sum{$1}`,
"workload_net_bytes_transmitted": `namespace:workload_net_bytes_transmitted:sum_irate{$1}`,
"workload_net_bytes_received": `namespace:workload_net_bytes_received:sum_irate{$1}`,
"workload_deployment_replica": `label_join(sum (label_join(label_replace(kube_deployment_spec_replicas{$2}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{$2}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_statefulset_replica": `label_join(sum (label_join(label_replace(kube_statefulset_replicas{$2}, "owner_kind", "StatefulSet", "", ""), "workload", "", "statefulset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_statefulset_replica_available": `label_join(sum (label_join(label_replace(kube_statefulset_status_replicas_current{$2}, "owner_kind", "StatefulSet", "", ""), "workload", "", "statefulset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_daemonset_replica": `label_join(sum (label_join(label_replace(kube_daemonset_status_desired_number_scheduled{$2}, "owner_kind", "DaemonSet", "", ""), "workload", "", "daemonset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_daemonset_replica_available": `label_join(sum (label_join(label_replace(kube_daemonset_status_number_available{$2}, "owner_kind", "DaemonSet", "", ""), "workload", "", "daemonset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_deployment_unavailable_replicas_ratio": `namespace:deployment_unavailable_replicas:ratio{$1}`,
"workload_daemonset_unavailable_replicas_ratio": `namespace:daemonset_unavailable_replicas:ratio{$1}`,
"workload_statefulset_unavailable_replicas_ratio": `namespace:statefulset_unavailable_replicas:ratio{$1}`,
// pod
"pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}, 0.001)`,
"pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
"pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""} - container_memory_cache{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
"pod_net_bytes_transmitted": `label_join(sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
"pod_net_bytes_received": `label_join(sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{pod_name!="", interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)", job="kubelet"}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{$1} * on (namespace, pod) group_left(node) kube_pod_info{$2}`,
// container
"container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", $1}[5m])), 0.001)`,
"container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", $1})`,
"container_memory_usage_wo_cache": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", $1} - container_memory_cache{job="kubelet", container_name!="POD", container_name!="", image!="", $1})`,
// pvc
"pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_inodes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_inodes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_inodes_utilisation": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used / kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_bytes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_available_bytes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_bytes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_bytes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
"pvc_bytes_utilisation": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_used_bytes / kubelet_volume_stats_capacity_bytes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{$1}`,
// component
"etcd_server_list": `label_replace(up{job="etcd"}, "node_ip", "$1", "instance", "(.*):.*")`,
"etcd_server_total": `count(up{job="etcd"})`,
"etcd_server_up_total": `etcd:up:sum`,
"etcd_server_has_leader": `label_replace(etcd_server_has_leader, "node_ip", "$1", "instance", "(.*):.*")`,
"etcd_server_leader_changes": `label_replace(etcd:etcd_server_leader_changes_seen:sum_changes, "node_ip", "$1", "node", "(.*)")`,
"etcd_server_proposals_failed_rate": `avg(etcd:etcd_server_proposals_failed:sum_irate)`,
"etcd_server_proposals_applied_rate": `avg(etcd:etcd_server_proposals_applied:sum_irate)`,
"etcd_server_proposals_committed_rate": `avg(etcd:etcd_server_proposals_committed:sum_irate)`,
"etcd_server_proposals_pending_count": `avg(etcd:etcd_server_proposals_pending:sum)`,
"etcd_mvcc_db_size": `avg(etcd:etcd_debugging_mvcc_db_total_size:sum)`,
"etcd_network_client_grpc_received_bytes": `sum(etcd:etcd_network_client_grpc_received_bytes:sum_irate)`,
"etcd_network_client_grpc_sent_bytes": `sum(etcd:etcd_network_client_grpc_sent_bytes:sum_irate)`,
"etcd_grpc_call_rate": `sum(etcd:grpc_server_started:sum_irate)`,
"etcd_grpc_call_failed_rate": `sum(etcd:grpc_server_handled:sum_irate)`,
"etcd_grpc_server_msg_received_rate": `sum(etcd:grpc_server_msg_received:sum_irate)`,
"etcd_grpc_server_msg_sent_rate": `sum(etcd:grpc_server_msg_sent:sum_irate)`,
"etcd_disk_wal_fsync_duration": `avg(etcd:etcd_disk_wal_fsync_duration:avg)`,
"etcd_disk_wal_fsync_duration_quantile": `avg(etcd:etcd_disk_wal_fsync_duration:histogram_quantile) by (quantile)`,
"etcd_disk_backend_commit_duration": `avg(etcd:etcd_disk_backend_commit_duration:avg)`,
"etcd_disk_backend_commit_duration_quantile": `avg(etcd:etcd_disk_backend_commit_duration:histogram_quantile) by (quantile)`,
"apiserver_up_sum": `apiserver:up:sum`,
"apiserver_request_rate": `apiserver:apiserver_request_count:sum_irate`,
"apiserver_request_by_verb_rate": `apiserver:apiserver_request_count:sum_verb_irate`,
"apiserver_request_latencies": `apiserver:apiserver_request_latencies:avg`,
"apiserver_request_by_verb_latencies": `apiserver:apiserver_request_latencies:avg_by_verb`,
"scheduler_up_sum": `scheduler:up:sum`,
"scheduler_schedule_attempts": `scheduler:scheduler_schedule_attempts:sum`,
"scheduler_schedule_attempt_rate": `scheduler:scheduler_schedule_attempts:sum_rate`,
"scheduler_e2e_scheduling_latency": `scheduler:scheduler_e2e_scheduling_latency:avg`,
"scheduler_e2e_scheduling_latency_quantile": `scheduler:scheduler_e2e_scheduling_latency:histogram_quantile`,
"controller_manager_up_sum": `controller_manager:up:sum`,
"coredns_up_sum": `coredns:up:sum`,
"coredns_cache_hits": `coredns:coredns_cache_hits_total:sum_irate`,
"coredns_cache_misses": `coredns:coredns_cache_misses:sum_irate`,
"coredns_dns_request_rate": `coredns:coredns_dns_request_count:sum_irate`,
"coredns_dns_request_duration": `coredns:coredns_dns_request_duration:avg`,
"coredns_dns_request_duration_quantile": `coredns:coredns_dns_request_duration:histogram_quantile`,
"coredns_dns_request_by_type_rate": `coredns:coredns_dns_request_type_count:sum_irate`,
"coredns_dns_request_by_rcode_rate": `coredns:coredns_dns_response_rcode_count:sum_irate`,
"coredns_panic_rate": `coredns:coredns_panic_count:sum_irate`,
"coredns_proxy_request_rate": `coredns:coredns_proxy_request_count:sum_irate`,
"coredns_proxy_request_duration": `coredns:coredns_proxy_request_duration:avg`,
"coredns_proxy_request_duration_quantile": `coredns:coredns_proxy_request_duration:histogram_quantile`,
"prometheus_up_sum": `prometheus:up:sum`,
"prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`,
}

View File

@@ -1,284 +0,0 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"strings"
)
// resources_filter = xxxx|xxxx
func MakeWorkloadPromQL(metricName, nsName, resources_filter, wkKind string) string {
switch wkKind {
case "deployment":
wkKind = Deployment
case "daemonset":
wkKind = DaemonSet
case "statefulset":
wkKind = StatefulSet
}
if wkKind == "" {
resources_filter = Any
} else if resources_filter == "" {
if strings.Contains(metricName, "pod") {
resources_filter = wkKind + ":" + Any
} else if strings.Contains(metricName, strings.ToLower(wkKind)) {
resources_filter = Any
}
} else {
var prefix string
// The "workload_{deployment,statefulset,daemonset}_xxx" metric uses "deployment","statefulset" or "daemonset" label selectors
// which match exactly a workload name
// eg. kube_daemonset_status_number_unavailable{daemonset=~"^xxx$"}
if strings.Contains(metricName, "deployment") || strings.Contains(metricName, "daemonset") || strings.Contains(metricName, "statefulset") {
// to pass "resources_filter" to PromQL, we reformat it
prefix = ""
} else {
// While workload_{cpu,memory,net}_xxx metrics uses "workload"
// eg. namespace:workload_cpu_usage:sum{workload="Deployment:xxx"}
prefix = wkKind + ":"
}
filters := strings.Split(resources_filter, "|")
// reshape it to match PromQL re2 syntax
resources_filter = ""
for i, filter := range filters {
resources_filter += "^" + prefix + filter + "$" // eg. ^Deployment:xxx$
if i != len(filters)-1 {
resources_filter += "|"
}
}
}
var promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$2", nsName, -1)
promql = strings.Replace(promql, "$3", resources_filter, -1)
return promql
}
func MakeSpecificWorkloadRule(wkKind, wkName, namespace string) string {
var rule = PodInfoRule
if namespace == "" {
namespace = ".*"
}
// alertnatives values: Deployment StatefulSet ReplicaSet DaemonSet
wkKind = strings.ToLower(wkKind)
switch wkKind {
case "deployment":
wkKind = ReplicaSet
if wkName != "" {
wkName = "~\"^" + wkName + `-(\\w)+$"`
} else {
wkName = "~\".*\""
}
rule = strings.Replace(rule, "$1", wkKind, -1)
rule = strings.Replace(rule, "$2", wkName, -1)
rule = strings.Replace(rule, "$3", namespace, -1)
return rule
case "replicaset":
wkKind = ReplicaSet
case "statefulset":
wkKind = StatefulSet
case "daemonset":
wkKind = DaemonSet
}
if wkName == "" {
wkName = "~\".*\""
} else {
wkName = "\"" + wkName + "\""
}
rule = strings.Replace(rule, "$1", wkKind, -1)
rule = strings.Replace(rule, "$2", wkName, -1)
rule = strings.Replace(rule, "$3", namespace, -1)
return rule
}
func MakeAllWorkspacesPromQL(metricsName, nsFilter string) string {
var promql = RulePromQLTmplMap[metricsName]
nsFilter = "!~\"" + nsFilter + "\""
promql = strings.Replace(promql, "$1", nsFilter, -1)
return promql
}
func MakeSpecificWorkspacePromQL(metricsName, nsFilter string, workspace string) string {
var promql = RulePromQLTmplMap[metricsName]
nsFilter = "=~\"" + nsFilter + "\""
workspace = "=~\"^(" + workspace + ")$\""
promql = strings.Replace(promql, "$1", nsFilter, -1)
promql = strings.Replace(promql, "$2", workspace, -1)
return promql
}
func MakeContainerPromQL(nsName, nodeId, podName, containerName, metricName, containerFilter string) string {
var promql string
if nsName != "" {
// get container metrics from namespace-pod
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
} else {
// get container metrics from node-pod
promql = RulePromQLTmplMap[metricName+"_node"]
promql = strings.Replace(promql, "$1", nodeId, -1)
}
promql = strings.Replace(promql, "$2", podName, -1)
if containerName == "" {
if containerFilter == "" {
containerFilter = ".*"
}
promql = strings.Replace(promql, "$3", containerFilter, -1)
} else {
promql = strings.Replace(promql, "$3", containerName, -1)
}
return promql
}
func MakePodPromQL(metricName, nsName, nodeID, podName, podFilter string) string {
if podFilter == "" {
podFilter = ".*"
}
var promql = ""
if nsName != "" {
// get pod metrics by namespace
if podName != "" {
// specific pod
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", podName, -1)
} else {
// all pods
metricName += "_all"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", podFilter, -1)
}
} else if nodeID != "" {
// get pod metrics by nodeid
metricName += "_node"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$3", nodeID, -1)
if podName != "" {
// specific pod
promql = strings.Replace(promql, "$2", podName, -1)
} else {
promql = strings.Replace(promql, "$2", podFilter, -1)
}
}
return promql
}
func MakePVCPromQL(metricName, nsName, pvcName, scName, pvcFilter string) string {
if pvcFilter == "" {
pvcFilter = ".*"
}
var promql = ""
if nsName != "" {
// get pvc metrics by namespace
if pvcName != "" {
// specific pvc
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", pvcName, -1)
} else {
// all pvc in a specific namespace
metricName += "_ns"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", nsName, -1)
promql = strings.Replace(promql, "$2", pvcFilter, -1)
}
} else {
if scName != "" {
// all pvc in a specific storageclass
metricName += "_sc"
promql = RulePromQLTmplMap[metricName]
promql = strings.Replace(promql, "$1", scName, -1)
}
}
return promql
}
func MakeNamespacePromQL(nsName string, nsFilter string, metricsName string) string {
var recordingRule = RulePromQLTmplMap[metricsName]
if nsName != "" {
nsFilter = nsName
} else {
if nsFilter == "" {
nsFilter = ".*"
}
}
recordingRule = strings.Replace(recordingRule, "$1", nsFilter, -1)
return recordingRule
}
// cluster rule
func MakeClusterRule(metricsName string) string {
var rule = RulePromQLTmplMap[metricsName]
return rule
}
// node rule
func MakeNodeRule(nodeID string, nodesFilter string, metricsName string) string {
var rule = RulePromQLTmplMap[metricsName]
if nodesFilter == "" {
nodesFilter = ".*"
}
if strings.Contains(metricsName, "disk_size") || strings.Contains(metricsName, "pod") || strings.Contains(metricsName, "usage") || strings.Contains(metricsName, "inode") || strings.Contains(metricsName, "load") {
// disk size promql
if nodeID != "" {
nodesFilter = "{" + "node" + "=" + "\"" + nodeID + "\"" + "}"
} else {
nodesFilter = "{" + "node" + "=~" + "\"" + nodesFilter + "\"" + "}"
}
rule = strings.Replace(rule, "$1", nodesFilter, -1)
} else {
// cpu, memory, network, disk_iops rules
if nodeID != "" {
// specific node
rule = rule + "{" + "node" + "=" + "\"" + nodeID + "\"" + "}"
} else {
// all nodes or specific nodes filted with re2 syntax
rule = rule + "{" + "node" + "=~" + "\"" + nodesFilter + "\"" + "}"
}
}
return rule
}
func MakeComponentRule(metricsName string) string {
var rule = RulePromQLTmplMap[metricsName]
return rule
}

View File

@@ -1,776 +0,0 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
const (
ResultTypeVector = "vector"
ResultTypeMatrix = "matrix"
MetricStatus = "status"
MetricStatusError = "error"
MetricStatusSuccess = "success"
ResultItemMetric = "metric"
ResultItemMetricResource = "resource"
ResultItemMetricResourceName = "resource_name"
ResultItemMetricNodeIp = "node_ip"
ResultItemMetricNodeName = "node_name"
ResultItemValue = "value"
ResultItemValues = "values"
ResultSortTypeDesc = "desc"
ResultSortTypeAsc = "asc"
)
const (
MetricNameWorkloadCount = "workload_count"
MetricNameNamespacePodCount = "namespace_pod_count"
MetricNameWorkspaceAllOrganizationCount = "workspace_all_organization_count"
MetricNameWorkspaceAllAccountCount = "workspace_all_account_count"
MetricNameWorkspaceAllProjectCount = "workspace_all_project_count"
MetricNameWorkspaceAllDevopsCount = "workspace_all_devops_project_count"
MetricNameClusterAllProjectCount = "cluster_namespace_count"
MetricNameWorkspaceNamespaceCount = "workspace_namespace_count"
MetricNameWorkspaceDevopsCount = "workspace_devops_project_count"
MetricNameWorkspaceMemberCount = "workspace_member_count"
MetricNameWorkspaceRoleCount = "workspace_role_count"
MetricNameComponentOnLine = "component_online_count"
MetricNameComponentLine = "component_count"
)
const (
WorkspaceResourceKindOrganization = "organization"
WorkspaceResourceKindAccount = "account"
WorkspaceResourceKindNamespace = "namespace"
WorkspaceResourceKindDevops = "devops"
WorkspaceResourceKindMember = "member"
WorkspaceResourceKindRole = "role"
)
const (
MetricLevelCluster = "cluster"
MetricLevelClusterWorkspace = "cluster_workspace"
MetricLevelNode = "node"
MetricLevelWorkspace = "workspace"
MetricLevelNamespace = "namespace"
MetricLevelPod = "pod"
MetricLevelPodName = "pod_name"
MetricLevelContainer = "container"
MetricLevelContainerName = "container_name"
MetricLevelPVC = "persistentvolumeclaim"
MetricLevelWorkload = "workload"
MetricLevelComponent = "component"
)
const (
ReplicaSet = "ReplicaSet"
StatefulSet = "StatefulSet"
DaemonSet = "DaemonSet"
Deployment = "Deployment"
Any = ".*"
)
const (
NodeStatusRule = `kube_node_status_condition{condition="Ready"} > 0`
PodInfoRule = `kube_pod_info{created_by_kind="$1",created_by_name=$2,namespace="$3"}`
NamespaceLabelRule = `kube_namespace_labels`
WorkloadReplicaSetOwnerRule = `kube_pod_owner{namespace="$1", owner_name!="<none>", owner_kind="ReplicaSet"}`
WorkspaceNamespaceLabelRule = `sum(kube_namespace_labels{label_kubesphere_io_workspace != ""}) by (label_kubesphere_io_workspace)`
ExcludedVirtualNetworkInterfaces = `interface!~"^(cali.+|tunl.+|dummy.+|kube.+|flannel.+|cni.+|docker.+|veth.+|lo.*)"`
)
const (
WorkspaceJoinedKey = "label_kubesphere_io_workspace"
)
// The metrics need to include extra info out of prometheus
// eg. add node name info to the etcd_server_list metric
const (
EtcdServerList = "etcd_server_list"
)
type MetricMap map[string]string
var ClusterMetricsNames = []string{
"cluster_cpu_utilisation",
"cluster_cpu_usage",
"cluster_cpu_total",
"cluster_memory_utilisation",
"cluster_memory_available",
"cluster_memory_total",
"cluster_memory_usage_wo_cache",
"cluster_net_utilisation",
"cluster_net_bytes_transmitted",
"cluster_net_bytes_received",
"cluster_disk_read_iops",
"cluster_disk_write_iops",
"cluster_disk_read_throughput",
"cluster_disk_write_throughput",
"cluster_disk_size_usage",
"cluster_disk_size_utilisation",
"cluster_disk_size_capacity",
"cluster_disk_size_available",
"cluster_disk_inode_total",
"cluster_disk_inode_usage",
"cluster_disk_inode_utilisation",
"cluster_node_online",
"cluster_node_offline",
"cluster_node_total",
"cluster_pod_count",
"cluster_pod_quota",
"cluster_pod_utilisation",
"cluster_pod_running_count",
"cluster_pod_succeeded_count",
"cluster_pod_abnormal_count",
"cluster_ingresses_extensions_count",
"cluster_cronjob_count",
"cluster_pvc_count",
"cluster_daemonset_count",
"cluster_deployment_count",
"cluster_endpoint_count",
"cluster_hpa_count",
"cluster_job_count",
"cluster_statefulset_count",
"cluster_replicaset_count",
"cluster_service_count",
"cluster_secret_count",
"cluster_ingresses_extensions_count",
"cluster_namespace_count",
"cluster_load1",
"cluster_load5",
"cluster_load15",
// New in ks 2.0
"cluster_pod_abnormal_ratio",
"cluster_node_offline_ratio",
}
var NodeMetricsNames = []string{
"node_cpu_utilisation",
"node_cpu_total",
"node_cpu_usage",
"node_memory_utilisation",
"node_memory_usage_wo_cache",
"node_memory_available",
"node_memory_total",
"node_net_utilisation",
"node_net_bytes_transmitted",
"node_net_bytes_received",
"node_disk_read_iops",
"node_disk_write_iops",
"node_disk_read_throughput",
"node_disk_write_throughput",
"node_disk_size_capacity",
"node_disk_size_available",
"node_disk_size_usage",
"node_disk_size_utilisation",
"node_disk_inode_total",
"node_disk_inode_usage",
"node_disk_inode_utilisation",
"node_pod_count",
"node_pod_quota",
"node_pod_utilisation",
"node_pod_running_count",
"node_pod_succeeded_count",
"node_pod_abnormal_count",
"node_load1",
"node_load5",
"node_load15",
// New in ks 2.0
"node_pod_abnormal_ratio",
}
var WorkspaceMetricsNames = []string{
"workspace_cpu_usage",
"workspace_memory_usage",
"workspace_memory_usage_wo_cache",
"workspace_net_bytes_transmitted",
"workspace_net_bytes_received",
"workspace_pod_count",
"workspace_pod_running_count",
"workspace_pod_succeeded_count",
"workspace_pod_abnormal_count",
"workspace_ingresses_extensions_count",
"workspace_cronjob_count",
"workspace_pvc_count",
"workspace_daemonset_count",
"workspace_deployment_count",
"workspace_endpoint_count",
"workspace_hpa_count",
"workspace_job_count",
"workspace_statefulset_count",
"workspace_replicaset_count",
"workspace_service_count",
"workspace_secret_count",
"workspace_all_project_count",
// New in ks 2.0
"workspace_pod_abnormal_ratio",
}
var NamespaceMetricsNames = []string{
"namespace_cpu_usage",
"namespace_memory_usage",
"namespace_memory_usage_wo_cache",
"namespace_net_bytes_transmitted",
"namespace_net_bytes_received",
"namespace_pod_count",
"namespace_pod_running_count",
"namespace_pod_succeeded_count",
"namespace_pod_abnormal_count",
"namespace_configmap_count_used",
"namespace_jobs_batch_count_used",
"namespace_roles_count_used",
"namespace_memory_limit_used",
"namespace_pvc_used",
"namespace_memory_request_used",
"namespace_pvc_count_used",
"namespace_cronjobs_batch_count_used",
"namespace_ingresses_extensions_count_used",
"namespace_cpu_limit_used",
"namespace_storage_request_used",
"namespace_deployment_count_used",
"namespace_pod_count_used",
"namespace_statefulset_count_used",
"namespace_daemonset_count_used",
"namespace_secret_count_used",
"namespace_service_count_used",
"namespace_cpu_request_used",
"namespace_service_loadbalancer_used",
"namespace_configmap_count_hard",
"namespace_jobs_batch_count_hard",
"namespace_roles_count_hard",
"namespace_memory_limit_hard",
"namespace_pvc_hard",
"namespace_memory_request_hard",
"namespace_pvc_count_hard",
"namespace_cronjobs_batch_count_hard",
"namespace_ingresses_extensions_count_hard",
"namespace_cpu_limit_hard",
"namespace_storage_request_hard",
"namespace_deployment_count_hard",
"namespace_pod_count_hard",
"namespace_statefulset_count_hard",
"namespace_daemonset_count_hard",
"namespace_secret_count_hard",
"namespace_service_count_hard",
"namespace_cpu_request_hard",
"namespace_service_loadbalancer_hard",
"namespace_cronjob_count",
"namespace_pvc_count",
"namespace_daemonset_count",
"namespace_deployment_count",
"namespace_endpoint_count",
"namespace_hpa_count",
"namespace_job_count",
"namespace_statefulset_count",
"namespace_replicaset_count",
"namespace_service_count",
"namespace_secret_count",
"namespace_ingresses_extensions_count",
// New in ks 2.0
"namespace_pod_abnormal_ratio",
"namespace_resourcequota_used_ratio",
}
var PodMetricsNames = []string{
"pod_cpu_usage",
"pod_memory_usage",
"pod_memory_usage_wo_cache",
"pod_net_bytes_transmitted",
"pod_net_bytes_received",
}
var WorkloadMetricsNames = []string{
"workload_pod_cpu_usage",
"workload_pod_memory_usage",
"workload_pod_memory_usage_wo_cache",
"workload_pod_net_bytes_transmitted",
"workload_pod_net_bytes_received",
"workload_deployment_replica",
"workload_deployment_replica_available",
"workload_statefulset_replica",
"workload_statefulset_replica_available",
"workload_daemonset_replica",
"workload_daemonset_replica_available",
// New in ks 2.0
"workload_deployment_unavailable_replicas_ratio",
"workload_daemonset_unavailable_replicas_ratio",
"workload_statefulset_unavailable_replicas_ratio",
}
var ContainerMetricsNames = []string{
"container_cpu_usage",
"container_memory_usage",
"container_memory_usage_wo_cache",
//"container_net_bytes_transmitted",
//"container_net_bytes_received",
}
var PVCMetricsNames = []string{
"pvc_inodes_available",
"pvc_inodes_used",
"pvc_inodes_total",
"pvc_inodes_utilisation",
"pvc_bytes_available",
"pvc_bytes_used",
"pvc_bytes_total",
"pvc_bytes_utilisation",
}
var ComponentMetricsNames = []string{
"etcd_server_list",
"etcd_server_total",
"etcd_server_up_total",
"etcd_server_has_leader",
"etcd_server_leader_changes",
"etcd_server_proposals_failed_rate",
"etcd_server_proposals_applied_rate",
"etcd_server_proposals_committed_rate",
"etcd_server_proposals_pending_count",
"etcd_mvcc_db_size",
"etcd_network_client_grpc_received_bytes",
"etcd_network_client_grpc_sent_bytes",
"etcd_grpc_call_rate",
"etcd_grpc_call_failed_rate",
"etcd_grpc_server_msg_received_rate",
"etcd_grpc_server_msg_sent_rate",
"etcd_disk_wal_fsync_duration",
"etcd_disk_wal_fsync_duration_quantile",
"etcd_disk_backend_commit_duration",
"etcd_disk_backend_commit_duration_quantile",
"apiserver_up_sum",
"apiserver_request_rate",
"apiserver_request_by_verb_rate",
"apiserver_request_latencies",
"apiserver_request_by_verb_latencies",
"scheduler_up_sum",
"scheduler_schedule_attempts",
"scheduler_schedule_attempt_rate",
"scheduler_e2e_scheduling_latency",
"scheduler_e2e_scheduling_latency_quantile",
"controller_manager_up_sum",
"coredns_up_sum",
"coredns_cache_hits",
"coredns_cache_misses",
"coredns_dns_request_rate",
"coredns_dns_request_duration",
"coredns_dns_request_duration_quantile",
"coredns_dns_request_by_type_rate",
"coredns_dns_request_by_rcode_rate",
"coredns_panic_rate",
"coredns_proxy_request_rate",
"coredns_proxy_request_duration",
"coredns_proxy_request_duration_quantile",
"prometheus_up_sum",
"prometheus_tsdb_head_samples_appended_rate",
}
var RulePromQLTmplMap = MetricMap{
//cluster
"cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m",
"cluster_cpu_usage": `round(:node_cpu_utilisation:avg1m * sum(node:node_num_cpu:sum), 0.001)`,
"cluster_cpu_total": "sum(node:node_num_cpu:sum)",
"cluster_memory_utilisation": ":node_memory_utilisation:",
"cluster_memory_available": "sum(node:node_memory_bytes_available:sum)",
"cluster_memory_total": "sum(node:node_memory_bytes_total:sum)",
"cluster_memory_usage_wo_cache": "sum(node:node_memory_bytes_total:sum) - sum(node:node_memory_bytes_available:sum)",
"cluster_net_utilisation": ":node_net_utilisation:sum_irate",
"cluster_net_bytes_transmitted": "sum(node:node_net_bytes_transmitted:sum_irate)",
"cluster_net_bytes_received": "sum(node:node_net_bytes_received:sum_irate)",
"cluster_disk_read_iops": "sum(node:data_volume_iops_reads:sum)",
"cluster_disk_write_iops": "sum(node:data_volume_iops_writes:sum)",
"cluster_disk_read_throughput": "sum(node:data_volume_throughput_bytes_read:sum)",
"cluster_disk_write_throughput": "sum(node:data_volume_throughput_bytes_written:sum)",
"cluster_disk_size_usage": `sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} - node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, instance))`,
"cluster_disk_size_utilisation": `cluster:disk_utilization:ratio`,
"cluster_disk_size_capacity": `sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, instance))`,
"cluster_disk_size_available": `sum(max(node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) by (device, instance))`,
"cluster_disk_inode_total": `sum(node:node_inodes_total:)`,
"cluster_disk_inode_usage": `sum(node:node_inodes_total:) - sum(node:node_inodes_free:)`,
"cluster_disk_inode_utilisation": `cluster:disk_inode_utilization:ratio`,
"cluster_namespace_count": `count(kube_namespace_annotations)`,
// cluster_pod_count = cluster_pod_running_count + cluster_pod_succeeded_count + cluster_pod_abnormal_count
"cluster_pod_count": `cluster:pod:sum`,
"cluster_pod_quota": `sum(max(kube_node_status_capacity_pods) by (node) unless on (node) (kube_node_status_condition{condition="Ready",status=~"unknown|false"} > 0))`,
"cluster_pod_utilisation": `cluster:pod_utilization:ratio`,
"cluster_pod_running_count": `cluster:pod_running:count`,
"cluster_pod_succeeded_count": `count(kube_pod_info unless on (pod) (kube_pod_status_phase{phase=~"Failed|Pending|Unknown|Running"} > 0) unless on (node) (kube_node_status_condition{condition="Ready",status=~"unknown|false"} > 0))`,
"cluster_pod_abnormal_count": `cluster:pod_abnormal:sum`,
"cluster_node_online": `sum(kube_node_status_condition{condition="Ready",status="true"})`,
"cluster_node_offline": `cluster:node_offline:sum`,
"cluster_node_total": `sum(kube_node_status_condition{condition="Ready"})`,
"cluster_configmap_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/configmaps"}) by (resource, type)`,
"cluster_jobs_batch_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/jobs.batch"}) by (resource, type)`,
"cluster_roles_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/roles.rbac.authorization.k8s.io"}) by (resource, type)`,
"cluster_memory_limit_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="limits.memory"}) by (resource, type)`,
"cluster_pvc_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="persistentvolumeclaims"}) by (resource, type)`,
"cluster_memory_request_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="requests.memory"}) by (resource, type)`,
"cluster_pvc_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/persistentvolumeclaims"}) by (resource, type)`,
"cluster_cronjobs_batch_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/cronjobs.batch"}) by (resource, type)`,
"cluster_ingresses_extensions_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/ingresses.extensions"}) by (resource, type)`,
"cluster_cpu_limit_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="limits.cpu"}) by (resource, type)`,
"cluster_storage_request_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="requests.storage"}) by (resource, type)`,
"cluster_deployment_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/deployments.apps"}) by (resource, type)`,
"cluster_pod_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/pods"}) by (resource, type)`,
"cluster_statefulset_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/statefulsets.apps"}) by (resource, type)`,
"cluster_daemonset_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/daemonsets.apps"}) by (resource, type)`,
"cluster_secret_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/secrets"}) by (resource, type)`,
"cluster_service_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="count/services"}) by (resource, type)`,
"cluster_cpu_request_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="requests.cpu"}) by (resource, type)`,
"cluster_service_loadbalancer_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", resource="services.loadbalancers"}) by (resource, type)`,
"cluster_cronjob_count": `sum(kube_cronjob_labels)`,
"cluster_pvc_count": `sum(kube_persistentvolumeclaim_info)`,
"cluster_daemonset_count": `sum(kube_daemonset_labels)`,
"cluster_deployment_count": `sum(kube_deployment_labels)`,
"cluster_endpoint_count": `sum(kube_endpoint_labels)`,
"cluster_hpa_count": `sum(kube_hpa_labels)`,
"cluster_job_count": `sum(kube_job_labels)`,
"cluster_statefulset_count": `sum(kube_statefulset_labels)`,
"cluster_replicaset_count": `count(kube_replicaset_created)`,
"cluster_service_count": `sum(kube_service_info)`,
"cluster_secret_count": `sum(kube_secret_info)`,
"cluster_pv_count": `sum(kube_persistentvolume_labels)`,
"cluster_ingresses_extensions_count": `sum(kube_ingress_labels)`,
"cluster_load1": `sum(node_load1{job="node-exporter"}) / sum(node:node_num_cpu:sum)`,
"cluster_load5": `sum(node_load5{job="node-exporter"}) / sum(node:node_num_cpu:sum)`,
"cluster_load15": `sum(node_load15{job="node-exporter"}) / sum(node:node_num_cpu:sum)`,
// cluster: New added in ks 2.0
"cluster_pod_abnormal_ratio": `cluster:pod_abnormal:ratio`,
"cluster_node_offline_ratio": `cluster:node_offline:ratio`,
//node
"node_cpu_utilisation": "node:node_cpu_utilisation:avg1m",
"node_cpu_total": "node:node_num_cpu:sum",
"node_memory_utilisation": "node:node_memory_utilisation:",
"node_memory_available": "node:node_memory_bytes_available:sum",
"node_memory_total": "node:node_memory_bytes_total:sum",
"node_memory_usage_wo_cache": "node:node_memory_bytes_total:sum$1 - node:node_memory_bytes_available:sum$1",
"node_net_utilisation": "node:node_net_utilisation:sum_irate",
"node_net_bytes_transmitted": "node:node_net_bytes_transmitted:sum_irate",
"node_net_bytes_received": "node:node_net_bytes_received:sum_irate",
"node_disk_read_iops": "node:data_volume_iops_reads:sum",
"node_disk_write_iops": "node:data_volume_iops_writes:sum",
"node_disk_read_throughput": "node:data_volume_throughput_bytes_read:sum",
"node_disk_write_throughput": "node:data_volume_throughput_bytes_written:sum",
"node_disk_size_capacity": `sum(max(node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1) by (device, node)) by (node)`,
"node_disk_size_available": `node:disk_space_available:$1`,
"node_disk_size_usage": `sum(max((node_filesystem_size_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"} - node_filesystem_avail_bytes{device=~"/dev/.*", device!~"/dev/loop\\d+", job="node-exporter"}) * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:$1) by (device, node)) by (node)`,
"node_disk_size_utilisation": `node:disk_space_utilization:ratio$1`,
"node_disk_inode_total": `node:node_inodes_total:$1`,
"node_disk_inode_usage": `node:node_inodes_total:$1 - node:node_inodes_free:$1`,
"node_disk_inode_utilisation": `node:disk_inode_utilization:ratio$1`,
"node_pod_count": `node:pod_count:sum$1`,
"node_pod_quota": `max(kube_node_status_capacity_pods$1) by (node) unless on (node) (kube_node_status_condition{condition="Ready",status=~"unknown|false"} > 0)`,
"node_pod_utilisation": `node:pod_utilization:ratio$1`,
"node_pod_running_count": `node:pod_running:count$1`,
"node_pod_succeeded_count": `node:pod_succeeded:count$1`,
"node_pod_abnormal_count": `node:pod_abnormal:count$1`,
// without log node: unless on(node) kube_node_labels{label_role="log"}
"node_cpu_usage": `round(node:node_cpu_utilisation:avg1m$1 * node:node_num_cpu:sum$1, 0.001)`,
"node_load1": `node:load1:ratio$1`,
"node_load5": `node:load5:ratio$1`,
"node_load15": `node:load15:ratio$1`,
// New in ks 2.0
"node_pod_abnormal_ratio": `node:pod_abnormal:ratio$1`,
//namespace
"namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace=~"$1"}, 0.001)`,
"namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", namespace=~"$1"}`,
"namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", namespace=~"$1"}`,
"namespace_net_bytes_transmitted": `sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", namespace=~"$1", pod_name!="", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]))* on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_net_bytes_received": `sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", namespace=~"$1", pod_name!="", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m])) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pod_count": `sum(kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pod_running_count": `sum(kube_pod_status_phase{phase="Running", namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pod_succeeded_count": `sum(kube_pod_status_phase{phase="Succeeded", namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pod_abnormal_count": `namespace:pod_abnormal:count{namespace!="", namespace=~"$1"}`,
"namespace_roles_count_used": `max(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace=~"$1", resource="count/roles.rbac.authorization.k8s.io"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pvc_used": `max(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace=~"$1", resource="persistentvolumeclaims"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_storage_request_used": `max(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace=~"$1", resource="requests.storage"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_service_loadbalancer_used": `max(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace=~"$1", resource="services.loadbalancers"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
// workarounds to calculate resource quota usage
"namespace_deployment_count_used": `count(kube_deployment_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_statefulset_count_used": `count(kube_statefulset_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_daemonset_count_used": `count(kube_daemonset_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_jobs_batch_count_used": `count(kube_job_info{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_cronjobs_batch_count_used": `count(kube_cronjob_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_pod_count_used": `sum(kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_service_count_used": `count(kube_service_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_ingresses_extensions_count_used": `count(kube_ingress_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_pvc_count_used": `count(kube_persistentvolumeclaim_info{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_secret_count_used": `count(kube_secret_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_configmap_count_used": `count(kube_configmap_created{namespace="$1"}) by (namespace) * on(namespace) group_left(label_kubesphere_io_workspace) kube_namespace_labels`,
"namespace_cpu_limit_used": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace="$1"}, 0.001)`,
"namespace_cpu_request_used": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace="$1"}, 0.001)`,
"namespace_memory_limit_used": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", namespace=~"$1"}`,
"namespace_memory_request_used": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", namespace=~"$1"}`,
"namespace_configmap_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/configmaps"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_jobs_batch_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/jobs.batch"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_roles_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/roles.rbac.authorization.k8s.io"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_memory_limit_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="limits.memory"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pvc_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="persistentvolumeclaims"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_memory_request_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="requests.memory"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pvc_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/persistentvolumeclaims"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_cronjobs_batch_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/cronjobs.batch"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_ingresses_extensions_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/ingresses.extensions"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_cpu_limit_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="limits.cpu"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_storage_request_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="requests.storage"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_deployment_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/deployments.apps"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pod_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/pods"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_statefulset_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/statefulsets.apps"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_daemonset_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/daemonsets.apps"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_secret_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/secrets"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_service_count_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="count/services"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_cpu_request_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="requests.cpu"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_service_loadbalancer_hard": `min(kube_resourcequota{resourcequota!="quota", type="hard", namespace!="", namespace=~"$1", resource="services.loadbalancers"}) by (namespace, resource, type) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_cronjob_count": `sum(kube_cronjob_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_pvc_count": `sum(kube_persistentvolumeclaim_info{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_daemonset_count": `sum(kube_daemonset_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_deployment_count": `sum(kube_deployment_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_endpoint_count": `sum(kube_endpoint_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_hpa_count": `sum(kube_hpa_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_job_count": `sum(kube_job_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_statefulset_count": `sum(kube_statefulset_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_replicaset_count": `count(kube_replicaset_created{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_service_count": `sum(kube_service_info{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_secret_count": `sum(kube_secret_info{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
"namespace_ingresses_extensions_count": `sum(kube_ingress_labels{namespace!="", namespace=~"$1"}) by (namespace) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels)`,
// New in ks 2.0
"namespace_pod_abnormal_ratio": `namespace:pod_abnormal:ratio{namespace!="", namespace=~"$1"}`,
"namespace_resourcequota_used_ratio": `namespace:resourcequota_used:ratio{namespace!="", namespace=~"$1"}`,
// pod
"pod_cpu_usage": `round(sum(irate(container_cpu_usage_seconds_total{job="kubelet", namespace="$1", pod_name!="", pod_name="$2", image!=""}[5m])) by (namespace, pod_name), 0.001)`,
"pod_memory_usage": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name!="", pod_name="$2", image!=""}) by (namespace, pod_name)`,
"pod_memory_usage_wo_cache": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name!="", pod_name="$2", image!=""} - container_memory_cache{job="kubelet", namespace="$1", pod_name!="", pod_name="$2",image!=""}) by (namespace, pod_name)`,
"pod_net_bytes_transmitted": `sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{namespace="$1", pod_name!="", pod_name="$2", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]))`,
"pod_net_bytes_received": `sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{namespace="$1", pod_name!="", pod_name="$2", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]))`,
"pod_cpu_usage_all": `round(sum(irate(container_cpu_usage_seconds_total{job="kubelet", namespace="$1", pod_name!="", pod_name=~"$2", image!=""}[5m])) by (namespace, pod_name), 0.001)`,
"pod_memory_usage_all": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name!="", pod_name=~"$2", image!=""}) by (namespace, pod_name)`,
"pod_memory_usage_wo_cache_all": `sum(container_memory_usage_bytes{job="kubelet", namespace="$1", pod_name!="", pod_name=~"$2", image!=""} - container_memory_cache{job="kubelet", namespace="$1", pod_name!="", pod_name=~"$2", image!=""}) by (namespace, pod_name)`,
"pod_net_bytes_transmitted_all": `sum by (namespace, pod_name) (irate(container_network_transmit_bytes_total{namespace="$1", pod_name!="", pod_name=~"$2", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]))`,
"pod_net_bytes_received_all": `sum by (namespace, pod_name) (irate(container_network_receive_bytes_total{namespace="$1", pod_name!="", pod_name=~"$2", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]))`,
"pod_cpu_usage_node": `round(sum by (node, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet",pod_name!="", pod_name=~"$2", image!=""}[5m]) * on (namespace, pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$3"}, "pod_name", "", "pod", "_name")), 0.001)`,
"pod_memory_usage_node": `sum by (node, pod_name) (container_memory_usage_bytes{job="kubelet",pod_name!="", pod_name=~"$2", image!=""} * on (namespace, pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$3"}, "pod_name", "", "pod", "_name"))`,
"pod_memory_usage_wo_cache_node": `sum by (node, pod_name) ((container_memory_usage_bytes{job="kubelet",pod_name!="", pod_name=~"$2", image!=""} - container_memory_cache{job="kubelet",pod_name!="", pod_name=~"$2", image!=""}) * on (namespace, pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$3"}, "pod_name", "", "pod", "_name"))`,
"pod_net_bytes_transmitted_node": `sum by (node, pod_name) (irate(container_network_transmit_bytes_total{pod_name!="", pod_name=~"$2", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]) * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$3"}, "pod_name", "", "pod", "_name"))`,
"pod_net_bytes_received_node": `sum by (node, pod_name) (irate(container_network_receive_bytes_total{pod_name!="", pod_name=~"$2", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m]) * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$3"}, "pod_name", "", "pod", "_name"))`,
// workload
// Join the "container_cpu_usage_seconds_total" metric with "kube_pod_owner" to calculate workload-level resource usage
//
// Note the name convention:
// For hardware resource metrics, combine pod metric name with `workload_`
// For k8s resource metrics, must specify the workload type in metric names
"workload_pod_cpu_usage": `round(namespace:workload_cpu_usage:sum{namespace="$2", workload=~"$3"}, 0.001)`,
"workload_pod_memory_usage": `namespace:workload_memory_usage:sum{namespace="$2", workload=~"$3"}`,
"workload_pod_memory_usage_wo_cache": `namespace:workload_memory_usage_wo_cache:sum{namespace="$2", workload=~"$3"}`,
"workload_pod_net_bytes_transmitted": `namespace:workload_net_bytes_transmitted:sum_irate{namespace="$2", workload=~"$3"}`,
"workload_pod_net_bytes_received": `namespace:workload_net_bytes_received:sum_irate{namespace="$2", workload=~"$3"}`,
"workload_deployment_replica": `label_join(sum (label_join(label_replace(kube_deployment_spec_replicas{namespace="$2", deployment=~"$3"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="$2", deployment=~"$3"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_statefulset_replica": `label_join(sum (label_join(label_replace(kube_statefulset_replicas{namespace="$2", statefulset=~"$3"}, "owner_kind", "StatefulSet", "", ""), "workload", "", "statefulset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_statefulset_replica_available": `label_join(sum (label_join(label_replace(kube_statefulset_status_replicas_current{namespace="$2", statefulset=~"$3"}, "owner_kind", "StatefulSet", "", ""), "workload", "", "statefulset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_daemonset_replica": `label_join(sum (label_join(label_replace(kube_daemonset_status_desired_number_scheduled{namespace="$2", daemonset=~"$3"}, "owner_kind", "DaemonSet", "", ""), "workload", "", "daemonset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
"workload_daemonset_replica_available": `label_join(sum (label_join(label_replace(kube_daemonset_status_number_available{namespace="$2", daemonset=~"$3"}, "owner_kind", "DaemonSet", "", ""), "workload", "", "daemonset")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`,
// New in ks 2.0
"workload_deployment_unavailable_replicas_ratio": `namespace:deployment_unavailable_replicas:ratio{namespace="$2", deployment=~"$3"}`,
"workload_daemonset_unavailable_replicas_ratio": `namespace:daemonset_unavailable_replicas:ratio{namespace="$2", daemonset=~"$3"}`,
"workload_statefulset_unavailable_replicas_ratio": `namespace:statefulset_unavailable_replicas:ratio{namespace="$2", statefulset=~"$3"}`,
// container
"container_cpu_usage": `round(sum(irate(container_cpu_usage_seconds_total{namespace="$1", pod_name="$2", container_name!="POD", container_name=~"$3"}[5m])) by (namespace, pod_name, container_name), 0.001)`,
"container_memory_usage": `sum(container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name!="POD", container_name=~"$3"}) by (namespace, pod_name, container_name)`,
"container_memory_usage_wo_cache": `container_memory_usage_bytes{namespace="$1", pod_name="$2", container_name!="POD", container_name=~"$3"} - ignoring(id, image, endpoint, instance, job, name, service) container_memory_cache{namespace="$1", pod_name="$2", container_name!="POD", container_name=~"$3"}`,
"container_net_bytes_transmitted": `sum(irate(container_network_transmit_bytes_total{job="kubelet", namespace="$1", pod_name="$2", container_name="POD", ` + ExcludedVirtualNetworkInterfaces + `}[5m])) by (namespace, pod_name, container_name)`,
"container_net_bytes_received": `sum(irate(container_network_receive_bytes_total{job="kubelet", namespace="$1", pod_name="$2", container_name="POD", ` + ExcludedVirtualNetworkInterfaces + `}[5m])) by (namespace, pod_name, container_name)`,
"container_cpu_usage_node": `round(sum by (node, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name="$2", container_name!="POD", container_name!="", container_name=~"$3", image!=""}[5m]) * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$1"}, "pod_name", "", "pod", "_name")), 0.001)`,
"container_memory_usage_node": `sum by (node, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", pod_name="$2", container_name!="POD", container_name!="", container_name=~"$3", image!=""} * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$1"}, "pod_name", "", "pod", "_name"))`,
"container_memory_usage_wo_cache_node": `sum by (node, pod_name, container_name) ((container_memory_usage_bytes{job="kubelet", pod_name="$2", container_name!="POD", container_name!="", container_name=~"$3", image!=""} - container_memory_cache{job="kubelet", pod_name="$2", container_name!="POD", container_name!="", container_name=~"$3", image!=""}) * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$1"}, "pod_name", "", "pod", "_name"))`,
"container_net_bytes_transmitted_node": `sum by (node, pod_name, container_name) (irate(container_network_transmit_bytes_total{job="kubelet", ` + ExcludedVirtualNetworkInterfaces + `, pod_name="$2", container_name="POD", container_name!="", image!=""}[5m]) * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$1"}, "pod_name", "", "pod", "_name"))`,
"container_net_bytes_received_node": `sum by (node, pod_name, container_name) (irate(container_network_receive_bytes_total{job="kubelet", ` + ExcludedVirtualNetworkInterfaces + `, pod_name="$2", container_name="POD", container_name!="", image!=""}[5m]) * on (pod_name) group_left(node) label_join(node_namespace_pod:kube_pod_info:{node="$1"}, "pod_name", "", "pod", "_name"))`,
// workspace
"workspace_cpu_usage": `round(sum(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace$1, label_kubesphere_io_workspace$2}), 0.001)`,
"workspace_memory_usage": `sum(namespace:container_memory_usage_bytes:sum{namespace!="", namespace$1, label_kubesphere_io_workspace$2})`,
"workspace_memory_usage_wo_cache": `sum(namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", namespace$1, label_kubesphere_io_workspace$2})`,
"workspace_net_bytes_transmitted": `sum(sum by (namespace) (irate(container_network_transmit_bytes_total{namespace!="", namespace$1, pod_name!="", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m])))`,
"workspace_net_bytes_received": `sum(sum by (namespace) (irate(container_network_receive_bytes_total{namespace!="", namespace$1, pod_name!="", ` + ExcludedVirtualNetworkInterfaces + `, job="kubelet"}[5m])))`,
"workspace_pod_count": `sum(kube_pod_status_phase{phase!~"Failed|Succeeded", namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_pod_running_count": `sum(kube_pod_status_phase{phase="Running", namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_pod_succeeded_count": `sum(kube_pod_status_phase{phase="Succeeded", namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_pod_abnormal_count": `count((kube_pod_info{node!="", namespace$1} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) (kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0)) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_configmap_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/configmaps"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_jobs_batch_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/jobs.batch"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_roles_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/roles.rbac.authorization.k8s.io"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_memory_limit_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="limits.memory"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_pvc_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="persistentvolumeclaims"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_memory_request_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="requests.memory"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_pvc_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/persistentvolumeclaims"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_cronjobs_batch_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/cronjobs.batch"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_ingresses_extensions_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/ingresses.extensions"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_cpu_limit_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="limits.cpu"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_storage_request_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="requests.storage"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_deployment_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/deployments.apps"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_pod_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/pods"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_statefulset_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/statefulsets.apps"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_daemonset_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/daemonsets.apps"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_secret_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/secrets"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_service_count_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="count/services"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_cpu_request_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="requests.cpu"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_service_loadbalancer_used": `sum(kube_resourcequota{resourcequota!="quota", type="used", namespace!="", namespace$1, resource="services.loadbalancers"} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2})) by (resource, type)`,
"workspace_ingresses_extensions_count": `sum(kube_ingress_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_cronjob_count": `sum(kube_cronjob_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_pvc_count": `sum(kube_persistentvolumeclaim_info{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_daemonset_count": `sum(kube_daemonset_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_deployment_count": `sum(kube_deployment_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_endpoint_count": `sum(kube_endpoint_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_hpa_count": `sum(kube_hpa_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_job_count": `sum(kube_job_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_statefulset_count": `sum(kube_statefulset_labels{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_replicaset_count": `count(kube_replicaset_created{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_service_count": `sum(kube_service_info{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_secret_count": `sum(kube_secret_info{namespace!="", namespace$1} * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
"workspace_all_project_count": `count(kube_namespace_annotations)`,
// New in ks 2.0
"workspace_pod_abnormal_ratio": `count((kube_pod_info{node!="", namespace$1} unless on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Succeeded"}>0) unless on (pod, namespace) ((kube_pod_status_ready{job="kube-state-metrics", condition="true"}>0) and on (pod, namespace) (kube_pod_status_phase{job="kube-state-metrics", phase="Running"}>0)) unless on (pod, namespace) (kube_pod_container_status_waiting_reason{job="kube-state-metrics", reason="ContainerCreating"}>0)) / sum(kube_pod_status_phase{phase!~"Succeeded", namespace!="", namespace$1}) * on (namespace) group_left(label_kubesphere_io_workspace)(kube_namespace_labels{label_kubesphere_io_workspace$2}))`,
// PVC
"pvc_inodes_available": `max (kubelet_volume_stats_inodes_free{namespace="$1",persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_used": `max (kubelet_volume_stats_inodes_used{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_total": `max (kubelet_volume_stats_inodes{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_utilisation": `max (kubelet_volume_stats_inodes_used{namespace="$1", persistentvolumeclaim="$2"}/kubelet_volume_stats_inodes{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_available": `max (kubelet_volume_stats_available_bytes{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_used": `max (kubelet_volume_stats_used_bytes{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_total": `max (kubelet_volume_stats_capacity_bytes{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_utilisation": `max (kubelet_volume_stats_used_bytes{namespace="$1", persistentvolumeclaim="$2"}/kubelet_volume_stats_capacity_bytes{namespace="$1", persistentvolumeclaim="$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_available_ns": `max (kubelet_volume_stats_inodes_free{namespace="$1",persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_used_ns": `max (kubelet_volume_stats_inodes_used{namespace="$1",persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_total_ns": `max (kubelet_volume_stats_inodes{namespace="$1",persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_utilisation_ns": `max (kubelet_volume_stats_inodes_used{namespace="$1", persistentvolumeclaim=~"$2"}/kubelet_volume_stats_inodes{namespace="$1", persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_available_ns": `max (kubelet_volume_stats_available_bytes{namespace="$1",persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_used_ns": `max (kubelet_volume_stats_used_bytes{namespace="$1",persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_total_ns": `max (kubelet_volume_stats_capacity_bytes{namespace="$1",persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_bytes_utilisation_ns": `max (kubelet_volume_stats_used_bytes{namespace="$1", persistentvolumeclaim=~"$2"}/kubelet_volume_stats_capacity_bytes{namespace="$1", persistentvolumeclaim=~"$2"})by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info`,
"pvc_inodes_available_sc": `max (kubelet_volume_stats_inodes_free)by(namespace,persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_inodes_used_sc": `max (kubelet_volume_stats_inodes_used)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_inodes_total_sc": `max (kubelet_volume_stats_inodes)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_inodes_utilisation_sc": `max (kubelet_volume_stats_inodes_used/kubelet_volume_stats_inodes)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_bytes_available_sc": `max (kubelet_volume_stats_available_bytes)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_bytes_used_sc": `max (kubelet_volume_stats_used_bytes)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_bytes_total_sc": `max (kubelet_volume_stats_capacity_bytes)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
"pvc_bytes_utilisation_sc": `max (kubelet_volume_stats_used_bytes/kubelet_volume_stats_capacity_bytes)by(namespace, persistentvolumeclaim)*on(namespace, persistentvolumeclaim)group_left(storageclass)kube_persistentvolumeclaim_info{storageclass="$1"}`,
// component
"etcd_server_list": `label_replace(up{job="etcd"}, "node_ip", "$1", "instance", "(.*):.*")`,
"etcd_server_total": `count(up{job="etcd"})`,
"etcd_server_up_total": `etcd:up:sum`,
"etcd_server_has_leader": `label_replace(etcd_server_has_leader, "node_ip", "$1", "instance", "(.*):.*")`,
"etcd_server_leader_changes": `label_replace(etcd:etcd_server_leader_changes_seen:sum_changes, "node_ip", "$1", "node", "(.*)")`,
"etcd_server_proposals_failed_rate": `avg(etcd:etcd_server_proposals_failed:sum_irate)`,
"etcd_server_proposals_applied_rate": `avg(etcd:etcd_server_proposals_applied:sum_irate)`,
"etcd_server_proposals_committed_rate": `avg(etcd:etcd_server_proposals_committed:sum_irate)`,
"etcd_server_proposals_pending_count": `avg(etcd:etcd_server_proposals_pending:sum)`,
"etcd_mvcc_db_size": `avg(etcd:etcd_debugging_mvcc_db_total_size:sum)`,
"etcd_network_client_grpc_received_bytes": `sum(etcd:etcd_network_client_grpc_received_bytes:sum_irate)`,
"etcd_network_client_grpc_sent_bytes": `sum(etcd:etcd_network_client_grpc_sent_bytes:sum_irate)`,
"etcd_grpc_call_rate": `sum(etcd:grpc_server_started:sum_irate)`,
"etcd_grpc_call_failed_rate": `sum(etcd:grpc_server_handled:sum_irate)`,
"etcd_grpc_server_msg_received_rate": `sum(etcd:grpc_server_msg_received:sum_irate)`,
"etcd_grpc_server_msg_sent_rate": `sum(etcd:grpc_server_msg_sent:sum_irate)`,
"etcd_disk_wal_fsync_duration": `avg(etcd:etcd_disk_wal_fsync_duration:avg)`,
"etcd_disk_wal_fsync_duration_quantile": `avg(etcd:etcd_disk_wal_fsync_duration:histogram_quantile) by (quantile)`,
"etcd_disk_backend_commit_duration": `avg(etcd:etcd_disk_backend_commit_duration:avg)`,
"etcd_disk_backend_commit_duration_quantile": `avg(etcd:etcd_disk_backend_commit_duration:histogram_quantile) by (quantile)`,
"apiserver_up_sum": `apiserver:up:sum`,
"apiserver_request_rate": `apiserver:apiserver_request_count:sum_irate`,
"apiserver_request_by_verb_rate": `apiserver:apiserver_request_count:sum_verb_irate`,
"apiserver_request_latencies": `apiserver:apiserver_request_latencies:avg`,
"apiserver_request_by_verb_latencies": `apiserver:apiserver_request_latencies:avg_by_verb`,
"scheduler_up_sum": `scheduler:up:sum`,
"scheduler_schedule_attempts": `scheduler:scheduler_schedule_attempts:sum`,
"scheduler_schedule_attempt_rate": `scheduler:scheduler_schedule_attempts:sum_rate`,
"scheduler_e2e_scheduling_latency": `scheduler:scheduler_e2e_scheduling_latency:avg`,
"scheduler_e2e_scheduling_latency_quantile": `scheduler:scheduler_e2e_scheduling_latency:histogram_quantile`,
"controller_manager_up_sum": `controller_manager:up:sum`,
"coredns_up_sum": `coredns:up:sum`,
"coredns_cache_hits": `coredns:coredns_cache_hits_total:sum_irate`,
"coredns_cache_misses": `coredns:coredns_cache_misses:sum_irate`,
"coredns_dns_request_rate": `coredns:coredns_dns_request_count:sum_irate`,
"coredns_dns_request_duration": `coredns:coredns_dns_request_duration:avg`,
"coredns_dns_request_duration_quantile": `coredns:coredns_dns_request_duration:histogram_quantile`,
"coredns_dns_request_by_type_rate": `coredns:coredns_dns_request_type_count:sum_irate`,
"coredns_dns_request_by_rcode_rate": `coredns:coredns_dns_response_rcode_count:sum_irate`,
"coredns_panic_rate": `coredns:coredns_panic_count:sum_irate`,
"coredns_proxy_request_rate": `coredns:coredns_proxy_request_count:sum_irate`,
"coredns_proxy_request_duration": `coredns:coredns_proxy_request_duration:avg`,
"coredns_proxy_request_duration_quantile": `coredns:coredns_proxy_request_duration:histogram_quantile`,
"prometheus_up_sum": `prometheus:up:sum`,
"prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`,
}

View File

@@ -32,31 +32,30 @@ func GetNamespacesWithMetrics(namespaces []*v1.Namespace) []*v1.Namespace {
nsFilter := "^(" + strings.Join(nsNameList, "|") + ")$"
var timeRelateParams = make(url.Values)
params := MonitoringRequestParams{
params := RequestParams{
ResourcesFilter: nsFilter,
Params: timeRelateParams,
QueryType: DefaultQueryType,
QueryParams: timeRelateParams,
QueryType: Query,
MetricsFilter: "namespace_cpu_usage|namespace_memory_usage_wo_cache|namespace_pod_count",
}
rawMetrics := GetNamespaceLevelMetrics(&params)
rawMetrics := GetNamespaceMetrics(params)
for _, result := range rawMetrics.Results {
for _, data := range result.Data.Result {
metricDescMap, ok := data[ResultItemMetric].(map[string]interface{})
if ok {
if ns, exist := metricDescMap[ResultItemMetricResourceName]; exist {
timeAndValue, ok := data[ResultItemValue].([]interface{})
if ok && len(timeAndValue) == 2 {
for i := 0; i < len(namespaces); i++ {
if namespaces[i].Name == ns {
if namespaces[i].Annotations == nil {
namespaces[i].Annotations = make(map[string]string, 0)
}
namespaces[i].Annotations[result.MetricName] = timeAndValue[1].(string)
}
}
ns, exist := data.Metric["namespace"]
if !exist || len(data.Value) != 2 {
continue
}
for _, item := range namespaces {
if item.Name == ns {
if item.Annotations == nil {
item.Annotations = make(map[string]string, 0)
}
item.Annotations[result.MetricName] = data.Value[1].(string)
}
}
}

View File

@@ -1,33 +1,59 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import "net/url"
const (
DefaultQueryStep = "10m"
DefaultQueryTimeout = "10s"
RangeQueryType = "query_range?"
DefaultQueryType = "query?"
import (
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"net/url"
)
type MonitoringRequestParams struct {
Params url.Values
type RequestParams struct {
QueryParams url.Values
QueryType string
SortMetricName string
SortMetric string
SortType string
PageNum string
LimitNum string
Tp string
Type string
MetricsFilter string
ResourcesFilter string
MetricsName string
NodeName string
WorkspaceName string
NamespaceName string
WorkloadKind string
WorkloadName string
NodeId string
WsName string
NsName string
PodName string
ContainerName string
PVCName string
StorageClassName string
ContainerName string
WorkloadKind string
ComponentName string
}
type APIResponse struct {
MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"`
v1alpha2.APIResponse
}
type Response struct {
MetricsLevel string `json:"metrics_level" description:"metric level, eg. cluster"`
Results []APIResponse `json:"results" description:"actual array of results"`
CurrentPage int `json:"page,omitempty" description:"current page returned"`
TotalPage int `json:"total_page,omitempty" description:"total number of pages"`
TotalItem int `json:"total_item,omitempty" description:"page size"`
}

View File

@@ -19,11 +19,13 @@
package metrics
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
"kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2"
"kubesphere.io/kubesphere/pkg/informers"
"math"
"sort"
"strconv"
"unicode"
"runtime/debug"
)
@@ -31,11 +33,18 @@ import (
const (
DefaultPageLimit = 5
DefaultPage = 1
ResultTypeVector = "vector"
ResultTypeMatrix = "matrix"
MetricStatusSuccess = "success"
ResultItemMetricResourceName = "resource_name"
ResultSortTypeDesc = "desc"
ResultSortTypeAsc = "asc"
)
type FormatedMetricDataWrapper struct {
fmtMetricData FormatedMetricData
by func(p, q *map[string]interface{}) bool
fmtMetricData v1alpha2.QueryResult
by func(p, q *v1alpha2.QueryValue) bool
}
func (wrapper FormatedMetricDataWrapper) Len() int {
@@ -51,7 +60,7 @@ func (wrapper FormatedMetricDataWrapper) Swap(i, j int) {
}
// sorted metric by ascending or descending order
func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetric) (*FormatedLevelMetric, int) {
func (rawMetrics *Response) SortBy(sortMetricName string, sortType string) (*Response, int) {
defer func() {
if err := recover(); err != nil {
klog.Errorln(err)
@@ -82,31 +91,31 @@ func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetri
if metricItem.MetricName == sortMetricName {
if sortType == ResultSortTypeAsc {
// asc
sort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {
value1 := (*p)[ResultItemValue].([]interface{})
value2 := (*q)[ResultItemValue].([]interface{})
sort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *v1alpha2.QueryValue) bool {
value1 := p.Value
value2 := q.Value
v1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)
v2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)
if v1 == v2 {
resourceName1 := (*p)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
resourceName2 := (*q)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
return resourceName1.(string) < resourceName2.(string)
resourceName1 := p.Metric[ResultItemMetricResourceName]
resourceName2 := q.Metric[ResultItemMetricResourceName]
return resourceName1 < resourceName2
}
return v1 < v2
}})
} else {
// desc
sort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {
value1 := (*p)[ResultItemValue].([]interface{})
value2 := (*q)[ResultItemValue].([]interface{})
sort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *v1alpha2.QueryValue) bool {
value1 := p.Value
value2 := q.Value
v1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)
v2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)
if v1 == v2 {
resourceName1 := (*p)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
resourceName2 := (*q)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
return resourceName1.(string) > resourceName2.(string)
resourceName1 := p.Metric[ResultItemMetricResourceName]
resourceName2 := q.Metric[ResultItemMetricResourceName]
return resourceName1 > resourceName2
}
return v1 > v2
@@ -116,10 +125,10 @@ func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetri
for _, r := range metricItem.Data.Result {
// record the ordering of resource_name to indexMap
// example: {"metric":{ResultItemMetricResourceName: "Deployment:xxx"},"value":[1541142931.731,"3"]}
resourceName, exist := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
resourceName, exist := r.Metric[ResultItemMetricResourceName]
if exist {
if _, exist := indexMap[resourceName.(string)]; !exist {
indexMap[resourceName.(string)] = i
if _, exist := indexMap[resourceName]; !exist {
indexMap[resourceName] = i
i = i + 1
}
}
@@ -128,9 +137,9 @@ func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetri
// iterator all metric to find max metricItems length
for _, r := range metricItem.Data.Result {
k, ok := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
k, ok := r.Metric[ResultItemMetricResourceName]
if ok {
currentResourceMap[k.(string)] = 1
currentResourceMap[k] = 1
}
}
@@ -154,12 +163,12 @@ func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetri
for i := 0; i < len(rawMetrics.Results); i++ {
re := rawMetrics.Results[i]
if re.Data.ResultType == ResultTypeVector && re.Status == MetricStatusSuccess {
sortedMetric := make([]map[string]interface{}, len(indexMap))
sortedMetric := make([]v1alpha2.QueryValue, len(indexMap))
for j := 0; j < len(re.Data.Result); j++ {
r := re.Data.Result[j]
k, exist := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]
k, exist := r.Metric[ResultItemMetricResourceName]
if exist {
index, exist := indexMap[k.(string)]
index, exist := indexMap[k]
if exist {
sortedMetric[index] = r
}
@@ -173,7 +182,7 @@ func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetri
return rawMetrics, len(indexMap)
}
func Page(pageNum string, limitNum string, fmtLevelMetric *FormatedLevelMetric, maxLength int) interface{} {
func (fmtLevelMetric *Response) Page(pageNum string, limitNum string, maxLength int) *Response {
if maxLength <= 0 {
return fmtLevelMetric
}
@@ -245,72 +254,40 @@ func Page(pageNum string, limitNum string, fmtLevelMetric *FormatedLevelMetric,
return fmtLevelMetric
}
// maybe this function is time consuming
// The metric param is the result from Prometheus HTTP query
func ReformatJson(metric string, metricsName string, needAddParams map[string]string, needDelParams ...string) *FormatedMetric {
var formatMetric FormatedMetric
err := jsonIter.Unmarshal([]byte(metric), &formatMetric)
func getNodeAddressAndRole(nodeName string) (string, string) {
nodeLister := informers.SharedInformerFactory().Core().V1().Nodes().Lister()
node, err := nodeLister.Get(nodeName)
if err != nil {
klog.Errorln("Unmarshal metric json failed", err.Error(), metric)
return "", ""
}
if formatMetric.MetricName == "" {
if metricsName != "" {
formatMetric.MetricName = metricsName
}
}
// retrive metrics success
if formatMetric.Status == MetricStatusSuccess {
result := formatMetric.Data.Result
for _, res := range result {
metric, exist := res[ResultItemMetric]
// Prometheus query result format: .data.result[].metric
// metricMap is the value of .data.result[].metric
metricMap, sure := metric.(map[string]interface{})
if exist && sure {
delete(metricMap, "__name__")
}
if len(needDelParams) > 0 {
for _, p := range needDelParams {
delete(metricMap, p)
}
}
if needAddParams != nil && len(needAddParams) > 0 {
for n := range needAddParams {
if v, ok := metricMap[n]; ok {
delete(metricMap, n)
metricMap[ResultItemMetricResourceName] = v
} else {
metricMap[ResultItemMetricResourceName] = needAddParams[n]
}
}
}
var addr string
for _, address := range node.Status.Addresses {
if address.Type == "InternalIP" {
addr = address.Address
break
}
}
return &formatMetric
role := "node"
_, exists := node.Labels["node-role.kubernetes.io/master"]
if exists {
role = "master"
}
return addr, role
}
func ReformatNodeStatusField(nodeMetric *FormatedMetric) *FormatedMetric {
metricCount := len(nodeMetric.Data.Result)
for i := 0; i < metricCount; i++ {
metric, exist := nodeMetric.Data.Result[i][ResultItemMetric]
if exist {
status, exist := metric.(map[string]interface{})[MetricStatus]
if exist {
status = UpperFirstLetter(status.(string))
metric.(map[string]interface{})[MetricStatus] = status
func getNodeName(nodeIp string) string {
nodeLister := informers.SharedInformerFactory().Core().V1().Nodes().Lister()
nodes, _ := nodeLister.List(labels.Everything())
for _, node := range nodes {
for _, address := range node.Status.Addresses {
if address.Type == "InternalIP" && address.Address == nodeIp {
return node.Name
}
}
}
return nodeMetric
}
func UpperFirstLetter(str string) string {
for i, ch := range str {
return string(unicode.ToUpper(ch)) + str[i+1:]
}
return ""
}

View File

@@ -168,11 +168,11 @@ func DeleteWorkspaceRoleBinding(workspace, username string, role string) error {
return err
}
func GetDevOpsProjects(workspaceName string) ([]string, error) {
func GetDevOpsProjectsCount(workspaceName string) (int, error) {
dbconn, err := clientset.ClientSets().MySQL()
if err != nil {
return nil, err
return 0, err
}
query := dbconn.Select(devops.DevOpsProjectIdColumn).
@@ -183,9 +183,9 @@ func GetDevOpsProjects(workspaceName string) ([]string, error) {
devOpsProjects := make([]string, 0)
if _, err := query.Load(&devOpsProjects); err != nil {
return nil, err
return 0, err
}
return devOpsProjects, nil
return len(devOpsProjects), nil
}
func WorkspaceUserCount(workspace string) (int, error) {
@@ -196,24 +196,24 @@ func WorkspaceUserCount(workspace string) (int, error) {
return count, nil
}
func GetOrgRoles(name string) ([]string, error) {
return constants.WorkSpaceRoles, nil
func GetOrgRolesCount(name string) (int, error) {
return len(constants.WorkSpaceRoles), nil
}
func WorkspaceNamespaces(workspaceName string) ([]string, error) {
func WorkspaceNamespaceCount(workspaceName string) (int, error) {
ns, err := Namespaces(workspaceName)
namespaces := make([]string, 0)
if err != nil {
return namespaces, err
return 0, err
}
for i := 0; i < len(ns); i++ {
namespaces = append(namespaces, ns[i].Name)
}
return namespaces, nil
return len(namespaces), nil
}
func WorkspaceCount() (int, error) {