@@ -18,6 +18,9 @@ package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -26,14 +29,19 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/monitoring/expressions"
|
||||
"kubesphere.io/kubesphere/pkg/models/openpitrix"
|
||||
resourcev1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
opclient "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
)
|
||||
|
||||
type MonitoringOperator interface {
|
||||
@@ -47,23 +55,31 @@ type MonitoringOperator interface {
|
||||
// TODO: expose KubeSphere self metrics in Prometheus format
|
||||
GetKubeSphereStats() Metrics
|
||||
GetWorkspaceStats(workspace string) Metrics
|
||||
|
||||
// meter
|
||||
GetNamedMetersOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) (Metrics, error)
|
||||
GetNamedMeters(metrics []string, time time.Time, opt monitoring.QueryOption) (Metrics, error)
|
||||
GetAppComponentsMap(ns string, apps []string) map[string][]string
|
||||
GetSerivePodsMap(ns string, services []string) map[string][]string
|
||||
}
|
||||
|
||||
type monitoringOperator struct {
|
||||
prometheus monitoring.Interface
|
||||
metricsserver monitoring.Interface
|
||||
k8s kubernetes.Interface
|
||||
ks ksinformers.SharedInformerFactory
|
||||
op openpitrix.Interface
|
||||
prometheus monitoring.Interface
|
||||
metricsserver monitoring.Interface
|
||||
k8s kubernetes.Interface
|
||||
ks ksinformers.SharedInformerFactory
|
||||
op openpitrix.Interface
|
||||
resourceGetter *resourcev1alpha3.ResourceGetter
|
||||
}
|
||||
|
||||
func NewMonitoringOperator(monitoringClient monitoring.Interface, metricsClient monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, opClient opclient.Client) MonitoringOperator {
|
||||
func NewMonitoringOperator(monitoringClient monitoring.Interface, metricsClient monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, opClient opclient.Client, resourceGetter *resourcev1alpha3.ResourceGetter) MonitoringOperator {
|
||||
return &monitoringOperator{
|
||||
prometheus: monitoringClient,
|
||||
metricsserver: metricsClient,
|
||||
k8s: k8s,
|
||||
ks: factory.KubeSphereSharedInformerFactory(),
|
||||
op: openpitrix.NewOpenpitrixOperator(factory.KubernetesSharedInformerFactory(), opClient),
|
||||
prometheus: monitoringClient,
|
||||
metricsserver: metricsClient,
|
||||
k8s: k8s,
|
||||
ks: factory.KubeSphereSharedInformerFactory(),
|
||||
op: openpitrix.NewOpenpitrixOperator(factory.KubernetesSharedInformerFactory(), opClient),
|
||||
resourceGetter: resourceGetter,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,3 +370,225 @@ func (mo monitoringOperator) GetWorkspaceStats(workspace string) Metrics {
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
meter related methods
|
||||
*/
|
||||
|
||||
func (mo monitoringOperator) getNamedMetersWithHourInterval(meters []string, t time.Time, opt monitoring.QueryOption) Metrics {
|
||||
|
||||
var opts []monitoring.QueryOption
|
||||
|
||||
opts = append(opts, opt)
|
||||
opts = append(opts, monitoring.MeterOption{
|
||||
Step: 1 * time.Hour,
|
||||
})
|
||||
|
||||
ress := mo.prometheus.GetNamedMeters(meters, t, opts)
|
||||
|
||||
return Metrics{Results: ress}
|
||||
}
|
||||
|
||||
func generateScalingFactorMap(step time.Duration) map[string]float64 {
|
||||
scalingMap := make(map[string]float64)
|
||||
|
||||
for k := range MeterResourceMap {
|
||||
scalingMap[k] = step.Hours()
|
||||
}
|
||||
return scalingMap
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) (metrics Metrics, err error) {
|
||||
|
||||
if step.Hours() < 1 {
|
||||
klog.Warning("step should be longer than one hour")
|
||||
step = 1 * time.Hour
|
||||
}
|
||||
if end.Sub(start).Hours() > 30*24 {
|
||||
if step.Hours() < 24 {
|
||||
err = errors.New("step should be larger than 24 hours")
|
||||
return
|
||||
}
|
||||
}
|
||||
if math.Mod(step.Hours(), 1.0) > 0 {
|
||||
err = errors.New("step should be integer hours")
|
||||
return
|
||||
}
|
||||
|
||||
// query time range: (start, end], so here we need to exclude start itself.
|
||||
if start.Add(step).After(end) {
|
||||
start = end
|
||||
} else {
|
||||
start = start.Add(step)
|
||||
}
|
||||
|
||||
var opts []monitoring.QueryOption
|
||||
|
||||
opts = append(opts, opt)
|
||||
opts = append(opts, monitoring.MeterOption{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
})
|
||||
|
||||
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, step, opts)
|
||||
sMap := generateScalingFactorMap(step)
|
||||
|
||||
for i, _ := range ress {
|
||||
ress[i].MetricData = updateMetricStatData(ress[i], sMap)
|
||||
}
|
||||
|
||||
return Metrics{Results: ress}, nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMeters(meters []string, time time.Time, opt monitoring.QueryOption) (Metrics, error) {
|
||||
|
||||
metersPerHour := mo.getNamedMetersWithHourInterval(meters, time, opt)
|
||||
|
||||
for metricIndex, _ := range metersPerHour.Results {
|
||||
|
||||
res := metersPerHour.Results[metricIndex]
|
||||
|
||||
metersPerHour.Results[metricIndex].MetricData = updateMetricStatData(res, nil)
|
||||
}
|
||||
|
||||
return metersPerHour, nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetAppComponentsMap(ns string, apps []string) map[string][]string {
|
||||
|
||||
componentsMap := make(map[string][]string)
|
||||
applicationList := []*appv1beta1.Application{}
|
||||
|
||||
result, err := mo.resourceGetter.List("applications", ns, query.New())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, obj := range result.Items {
|
||||
app, ok := obj.(*appv1beta1.Application)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
applicationList = append(applicationList, app)
|
||||
}
|
||||
|
||||
getAppFullName := func(appObject *v1beta1.Application) (name string) {
|
||||
name = appObject.Labels[constants.ApplicationName]
|
||||
if appObject.Labels[constants.ApplicationVersion] != "" {
|
||||
name += fmt.Sprintf(":%v", appObject.Labels[constants.ApplicationVersion])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
appFilter := func(appObject *v1beta1.Application) bool {
|
||||
|
||||
for _, app := range apps {
|
||||
var applicationName, applicationVersion string
|
||||
tmp := strings.Split(app, ":")
|
||||
|
||||
if len(tmp) >= 1 {
|
||||
applicationName = tmp[0]
|
||||
}
|
||||
if len(tmp) == 2 {
|
||||
applicationVersion = tmp[1]
|
||||
}
|
||||
|
||||
if applicationName != "" && appObject.Labels[constants.ApplicationName] != applicationName {
|
||||
return false
|
||||
}
|
||||
if applicationVersion != "" && appObject.Labels[constants.ApplicationVersion] != applicationVersion {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for _, appObj := range applicationList {
|
||||
if appFilter(appObj) {
|
||||
for _, com := range appObj.Status.ComponentList.Objects {
|
||||
kind := strings.Title(com.Kind)
|
||||
name := com.Name
|
||||
componentsMap[getAppFullName((appObj))] = append(componentsMap[getAppFullName(appObj)], kind+":"+name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return componentsMap
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) getApplicationPVCs(appObject *v1beta1.Application) []string {
|
||||
|
||||
var pvcList []string
|
||||
|
||||
ns := appObject.Namespace
|
||||
for _, com := range appObject.Status.ComponentList.Objects {
|
||||
|
||||
switch strings.Title(com.Kind) {
|
||||
case "Deployment":
|
||||
deployObj, err := mo.k8s.AppsV1().Deployments(ns).Get(context.Background(), com.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, vol := range deployObj.Spec.Template.Spec.Volumes {
|
||||
pvcList = append(pvcList, vol.PersistentVolumeClaim.ClaimName)
|
||||
}
|
||||
case "Statefulset":
|
||||
stsObj, err := mo.k8s.AppsV1().StatefulSets(ns).Get(context.Background(), com.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return nil
|
||||
}
|
||||
for _, vol := range stsObj.Spec.Template.Spec.Volumes {
|
||||
pvcList = append(pvcList, vol.PersistentVolumeClaim.ClaimName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return pvcList
|
||||
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetSerivePodsMap(ns string, services []string) map[string][]string {
|
||||
var svcPodsMap = make(map[string][]string)
|
||||
|
||||
for _, svc := range services {
|
||||
svcObj, err := mo.k8s.CoreV1().Services(ns).Get(context.Background(), svc, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
svcSelector := svcObj.Spec.Selector
|
||||
if len(svcSelector) == 0 {
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
svcLabels := labels.Set{}
|
||||
for key, value := range svcSelector {
|
||||
svcLabels[key] = value
|
||||
}
|
||||
|
||||
selector := labels.SelectorFromSet(svcLabels)
|
||||
opt := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
|
||||
podList, err := mo.k8s.CoreV1().Pods(ns).List(context.Background(), opt)
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
svcPodsMap[svc] = append(svcPodsMap[svc], pod.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ const (
|
||||
WorkspaceDevopsCount = "workspace_devops_project_count"
|
||||
WorkspaceMemberCount = "workspace_member_count"
|
||||
WorkspaceRoleCount = "workspace_role_count"
|
||||
|
||||
MetricMeterPrefix = "meter_"
|
||||
)
|
||||
|
||||
var ClusterMetrics = []string{
|
||||
@@ -78,6 +80,13 @@ var ClusterMetrics = []string{
|
||||
"cluster_load15",
|
||||
"cluster_pod_abnormal_ratio",
|
||||
"cluster_node_offline_ratio",
|
||||
|
||||
// meter
|
||||
"meter_cluster_cpu_usage",
|
||||
"meter_cluster_memory_usage",
|
||||
"meter_cluster_net_bytes_transmitted",
|
||||
"meter_cluster_net_bytes_received",
|
||||
"meter_cluster_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var NodeMetrics = []string{
|
||||
@@ -113,6 +122,13 @@ var NodeMetrics = []string{
|
||||
"node_load15",
|
||||
"node_pod_abnormal_ratio",
|
||||
"node_pleg_quantile",
|
||||
|
||||
// meter
|
||||
"meter_node_cpu_usage",
|
||||
"meter_node_memory_usage_wo_cache",
|
||||
"meter_node_net_bytes_transmitted",
|
||||
"meter_node_net_bytes_received",
|
||||
"meter_node_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var WorkspaceMetrics = []string{
|
||||
@@ -138,6 +154,13 @@ var WorkspaceMetrics = []string{
|
||||
"workspace_service_count",
|
||||
"workspace_secret_count",
|
||||
"workspace_pod_abnormal_ratio",
|
||||
|
||||
// meter
|
||||
"meter_workspace_cpu_usage",
|
||||
"meter_workspace_memory_usage",
|
||||
"meter_workspace_net_bytes_transmitted",
|
||||
"meter_workspace_net_bytes_received",
|
||||
"meter_workspace_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var NamespaceMetrics = []string{
|
||||
@@ -168,6 +191,23 @@ var NamespaceMetrics = []string{
|
||||
"namespace_configmap_count",
|
||||
"namespace_ingresses_extensions_count",
|
||||
"namespace_s2ibuilder_count",
|
||||
|
||||
// meter
|
||||
"meter_namespace_cpu_usage",
|
||||
"meter_namespace_memory_usage_wo_cache",
|
||||
"meter_namespace_net_bytes_transmitted",
|
||||
"meter_namespace_net_bytes_received",
|
||||
"meter_namespace_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ApplicationMetrics = []string{
|
||||
|
||||
// meter
|
||||
"meter_application_cpu_usage",
|
||||
"meter_application_memory_usage_wo_cache",
|
||||
"meter_application_net_bytes_transmitted",
|
||||
"meter_application_net_bytes_received",
|
||||
"meter_application_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var WorkloadMetrics = []string{
|
||||
@@ -185,6 +225,21 @@ var WorkloadMetrics = []string{
|
||||
"workload_deployment_unavailable_replicas_ratio",
|
||||
"workload_daemonset_unavailable_replicas_ratio",
|
||||
"workload_statefulset_unavailable_replicas_ratio",
|
||||
|
||||
// meter
|
||||
"meter_workload_cpu_usage",
|
||||
"meter_workload_memory_usage_wo_cache",
|
||||
"meter_workload_net_bytes_transmitted",
|
||||
"meter_workload_net_bytes_received",
|
||||
"meter_workload_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ServiceMetrics = []string{
|
||||
// meter
|
||||
"meter_service_cpu_usage",
|
||||
"meter_service_memory_usage_wo_cache",
|
||||
"meter_service_net_bytes_transmitted",
|
||||
"meter_service_net_bytes_received",
|
||||
}
|
||||
|
||||
var PodMetrics = []string{
|
||||
@@ -193,6 +248,13 @@ var PodMetrics = []string{
|
||||
"pod_memory_usage_wo_cache",
|
||||
"pod_net_bytes_transmitted",
|
||||
"pod_net_bytes_received",
|
||||
|
||||
// meter
|
||||
"meter_pod_cpu_usage",
|
||||
"meter_pod_memory_usage_wo_cache",
|
||||
"meter_pod_net_bytes_transmitted",
|
||||
"meter_pod_net_bytes_received",
|
||||
"meter_pod_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ContainerMetrics = []string{
|
||||
|
||||
@@ -23,13 +23,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
IdentifierNode = "node"
|
||||
IdentifierWorkspace = "workspace"
|
||||
IdentifierNamespace = "namespace"
|
||||
IdentifierWorkload = "workload"
|
||||
IdentifierPod = "pod"
|
||||
IdentifierContainer = "container"
|
||||
IdentifierPVC = "persistentvolumeclaim"
|
||||
IdentifierNode = "node"
|
||||
IdentifierWorkspace = "workspace"
|
||||
IdentifierNamespace = "namespace"
|
||||
IdentifierWorkload = "workload"
|
||||
IdentifierPod = "pod"
|
||||
IdentifierContainer = "container"
|
||||
IdentifierPVC = "persistentvolumeclaim"
|
||||
IdentifierService = "service"
|
||||
IdentifierApplication = "application"
|
||||
|
||||
OrderAscending = "asc"
|
||||
OrderDescending = "desc"
|
||||
|
||||
@@ -16,7 +16,9 @@ limitations under the License.
|
||||
|
||||
package monitoring
|
||||
|
||||
import "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
Results []monitoring.Metric `json:"results" description:"actual array of results"`
|
||||
|
||||
222
pkg/models/monitoring/utils.go
Normal file
222
pkg/models/monitoring/utils.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
const (
|
||||
METER_RESOURCE_TYPE_CPU = iota
|
||||
METER_RESOURCE_TYPE_MEM
|
||||
METER_RESOURCE_TYPE_NET_INGRESS
|
||||
METER_RESOURCE_TYPE_NET_EGRESS
|
||||
METER_RESOURCE_TYPE_PVC
|
||||
|
||||
meteringConfig = "/etc/kubesphere/metering/ks-metering.yaml"
|
||||
)
|
||||
|
||||
var MeterResourceMap = map[string]int{
|
||||
"meter_cluster_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_cluster_memory_usage": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_cluster_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_cluster_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_cluster_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_node_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_node_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_node_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_node_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_node_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_workspace_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_workspace_memory_usage": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_workspace_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_workspace_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_workspace_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_namespace_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_namespace_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_namespace_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_namespace_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_namespace_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_application_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_application_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_application_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_application_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_application_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_workload_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_workload_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_workload_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_workload_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_workload_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_service_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_service_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_service_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_service_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_pod_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_pod_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_pod_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_pod_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_pod_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
}
|
||||
|
||||
type PriceInfo struct {
|
||||
CpuPerCorePerHour float64 `json:"cpuPerCorePerHour" yaml:"cpuPerCorePerHour"`
|
||||
MemPerGigabytesPerHour float64 `json:"memPerGigabytesPerHour" yaml:"memPerGigabytesPerHour"`
|
||||
IngressNetworkTrafficPerGiagabytesPerHour float64 `json:"ingressNetworkTrafficPerGiagabytesPerHour" yaml:"ingressNetworkTrafficPerGiagabytesPerHour"`
|
||||
EgressNetworkTrafficPerGigabytesPerHour float64 `json:"egressNetworkTrafficPerGigabytesPerHour" yaml:"egressNetworkTrafficPerGigabytesPerHour"`
|
||||
PvcPerGigabytesPerHour float64 `json:"pvcPerGigabytesPerHour" yaml:"pvcPerGigabytesPerHour"`
|
||||
}
|
||||
|
||||
type Billing struct {
|
||||
PriceInfo PriceInfo `json:"priceInfo" yaml:"priceInfo"`
|
||||
}
|
||||
|
||||
type MeterConfig struct {
|
||||
Billing Billing `json:"billing" yaml:"billing"`
|
||||
}
|
||||
|
||||
func (mc MeterConfig) GetPriceInfo() PriceInfo {
|
||||
return mc.Billing.PriceInfo
|
||||
}
|
||||
|
||||
func LoadYaml() (*MeterConfig, error) {
|
||||
|
||||
var meterConfig MeterConfig
|
||||
|
||||
mf, err := os.Open(meteringConfig)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = yaml.NewYAMLOrJSONDecoder(mf, 1024).Decode(&meterConfig); err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &meterConfig, nil
|
||||
}
|
||||
|
||||
func getMaxPointValue(points []monitoring.Point) float64 {
|
||||
var max float64
|
||||
for i, p := range points {
|
||||
if i == 0 {
|
||||
max = p.Value()
|
||||
}
|
||||
|
||||
if p.Value() > max {
|
||||
max = p.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return max
|
||||
}
|
||||
|
||||
func getMinPointValue(points []monitoring.Point) float64 {
|
||||
var min float64
|
||||
for i, p := range points {
|
||||
if i == 0 {
|
||||
min = p.Value()
|
||||
}
|
||||
|
||||
if p.Value() < min {
|
||||
min = p.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func getSumPointValue(points []monitoring.Point) float64 {
|
||||
avg := 0.0
|
||||
|
||||
for _, p := range points {
|
||||
avg += p.Value()
|
||||
}
|
||||
|
||||
return avg
|
||||
}
|
||||
|
||||
func getAvgPointValue(points []monitoring.Point) float64 {
|
||||
return getSumPointValue(points) / float64(len(points))
|
||||
}
|
||||
|
||||
func getFeeWithMeterName(meterName string, sum float64) float64 {
|
||||
|
||||
meterConfig, err := LoadYaml()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return -1
|
||||
}
|
||||
priceInfo := meterConfig.GetPriceInfo()
|
||||
|
||||
if resourceType, ok := MeterResourceMap[meterName]; !ok {
|
||||
klog.Errorf("invlaid meter %v", meterName)
|
||||
return -1
|
||||
} else {
|
||||
switch resourceType {
|
||||
case METER_RESOURCE_TYPE_CPU:
|
||||
// unit: core, precision: 0.001
|
||||
sum = math.Round(sum*1000) / 1000
|
||||
return priceInfo.CpuPerCorePerHour * sum
|
||||
case METER_RESOURCE_TYPE_MEM:
|
||||
// unit: Gigabyte, precision: 0.1
|
||||
sum = math.Round(sum/1073741824*10) / 10
|
||||
return priceInfo.MemPerGigabytesPerHour * sum
|
||||
case METER_RESOURCE_TYPE_NET_INGRESS:
|
||||
// unit: Megabyte, precision: 1
|
||||
sum = math.Round(sum / 1048576)
|
||||
return priceInfo.IngressNetworkTrafficPerGiagabytesPerHour * sum
|
||||
case METER_RESOURCE_TYPE_NET_EGRESS:
|
||||
// unit: Megabyte, precision:
|
||||
sum = math.Round(sum / 1048576)
|
||||
return priceInfo.EgressNetworkTrafficPerGigabytesPerHour * sum
|
||||
case METER_RESOURCE_TYPE_PVC:
|
||||
// unit: Gigabyte, precision: 0.1
|
||||
sum = math.Round(sum/1073741824*10) / 10
|
||||
return priceInfo.PvcPerGigabytesPerHour * sum
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float64) monitoring.MetricData {
|
||||
metricName := metric.MetricName
|
||||
metricData := metric.MetricData
|
||||
for index, metricValue := range metricData.MetricValues {
|
||||
|
||||
var points []monitoring.Point
|
||||
if metricData.MetricType == monitoring.MetricTypeMatrix {
|
||||
points = metricValue.Series
|
||||
} else {
|
||||
points = append(points, *metricValue.Sample)
|
||||
}
|
||||
|
||||
var factor float64 = 1
|
||||
if scalingMap != nil {
|
||||
factor = scalingMap[metricName]
|
||||
}
|
||||
|
||||
if len(points) == 1 {
|
||||
sample := points[0]
|
||||
sum := sample[1] * factor
|
||||
metricData.MetricValues[index].MinValue = sample[1]
|
||||
metricData.MetricValues[index].MaxValue = sample[1]
|
||||
metricData.MetricValues[index].AvgValue = sample[1]
|
||||
metricData.MetricValues[index].SumValue = sum
|
||||
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
|
||||
} else {
|
||||
sum := getSumPointValue(points) * factor
|
||||
metricData.MetricValues[index].MinValue = getMinPointValue(points)
|
||||
metricData.MetricValues[index].MaxValue = getMaxPointValue(points)
|
||||
metricData.MetricValues[index].AvgValue = getAvgPointValue(points)
|
||||
metricData.MetricValues[index].SumValue = sum
|
||||
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
|
||||
}
|
||||
|
||||
}
|
||||
return metricData
|
||||
}
|
||||
Reference in New Issue
Block a user