Intergate OpenPitrix metrics into metering.

Signed-off-by: Rao Yunkun <yunkunrao@yunify.com>
This commit is contained in:
Rao Yunkun
2021-03-29 15:53:22 +08:00
parent ea93f3832d
commit 845f6bbe89
15 changed files with 551 additions and 151 deletions

View File

@@ -1,7 +1,8 @@
package monitoring
import (
"math"
"fmt"
"math/big"
"os"
"k8s.io/apimachinery/pkg/util/yaml"
@@ -18,6 +19,13 @@ const (
METER_RESOURCE_TYPE_PVC
meteringConfig = "/etc/kubesphere/metering/ks-metering.yaml"
meteringDefaultPrecision = 10
meteringCorePrecision = 3
meteringMemPrecision = 1
meteringIngressPrecision = 0
meteringEgressPrecision = 0
meteringPvcPrecision = 1
)
var meterResourceUnitMap = map[int]string{
@@ -110,48 +118,59 @@ func LoadYaml() (*MeterConfig, error) {
return &meterConfig, nil
}
func getMaxPointValue(points []monitoring.Point) float64 {
var max float64
func getMaxPointValue(points []monitoring.Point) string {
var max *big.Float
for i, p := range points {
if i == 0 {
max = p.Value()
max = new(big.Float).SetFloat64(p.Value())
}
if p.Value() > max {
max = p.Value()
pf := new(big.Float).SetFloat64(p.Value())
if pf.Cmp(max) == 1 {
max = pf
}
}
return max
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), max)
}
func getMinPointValue(points []monitoring.Point) float64 {
var min float64
func getMinPointValue(points []monitoring.Point) string {
var min *big.Float
for i, p := range points {
if i == 0 {
min = p.Value()
min = new(big.Float).SetFloat64(p.Value())
}
if p.Value() < min {
min = p.Value()
pf := new(big.Float).SetFloat64(p.Value())
if min.Cmp(pf) == 1 {
min = pf
}
}
return min
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), min)
}
func getSumPointValue(points []monitoring.Point) float64 {
avg := 0.0
func getSumPointValue(points []monitoring.Point) string {
sum := new(big.Float).SetFloat64(0)
for _, p := range points {
avg += p.Value()
pf := new(big.Float).SetFloat64(p.Value())
sum.Add(sum, pf)
}
return avg
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), sum)
}
func getAvgPointValue(points []monitoring.Point) float64 {
return getSumPointValue(points) / float64(len(points))
func getAvgPointValue(points []monitoring.Point) string {
sum, ok := new(big.Float).SetString(getSumPointValue(points))
if !ok {
klog.Error("failed to parse big.Float")
return ""
}
length := new(big.Float).SetFloat64(float64(len(points)))
return fmt.Sprintf("%.3f", sum.Quo(sum, length))
}
func getCurrencyUnit() string {
@@ -164,6 +183,10 @@ func getCurrencyUnit() string {
return meterConfig.GetPriceInfo().CurrencyUnit
}
func generateFloatFormat(precision int) string {
return "%." + fmt.Sprintf("%d", precision) + "f"
}
func getResourceUnit(meterName string) string {
if resourceType, ok := MeterResourceMap[meterName]; !ok {
klog.Errorf("invlaid meter %v", meterName)
@@ -173,43 +196,66 @@ func getResourceUnit(meterName string) string {
}
}
func getFeeWithMeterName(meterName string, sum float64) float64 {
func getFeeWithMeterName(meterName string, sum string) string {
s, ok := new(big.Float).SetString(sum)
if !ok {
klog.Error("failed to parse string to float")
return ""
}
meterConfig, err := LoadYaml()
if err != nil {
klog.Error(err)
return -1
return ""
}
priceInfo := meterConfig.GetPriceInfo()
if resourceType, ok := MeterResourceMap[meterName]; !ok {
klog.Errorf("invlaid meter %v", meterName)
return -1
return ""
} else {
switch resourceType {
case METER_RESOURCE_TYPE_CPU:
// unit: core, precision: 0.001
sum = math.Round(sum*1000) / 1000
return priceInfo.CpuPerCorePerHour * sum
CpuPerCorePerHour := new(big.Float).SetFloat64(priceInfo.CpuPerCorePerHour)
tmp := s.Mul(s, CpuPerCorePerHour)
return fmt.Sprintf(generateFloatFormat(meteringCorePrecision), tmp)
case METER_RESOURCE_TYPE_MEM:
// unit: Gigabyte, precision: 0.1
sum = math.Round(sum/1073741824*10) / 10
return priceInfo.MemPerGigabytesPerHour * sum
oneGiga := new(big.Float).SetInt64(1073741824)
MemPerGigabytesPerHour := new(big.Float).SetFloat64(priceInfo.MemPerGigabytesPerHour)
// transform unit from bytes to Gigabytes
s.Quo(s, oneGiga)
return fmt.Sprintf(generateFloatFormat(meteringMemPrecision), s.Mul(s, MemPerGigabytesPerHour))
case METER_RESOURCE_TYPE_NET_INGRESS:
// unit: Megabyte, precision: 1
sum = math.Round(sum / 1048576)
return priceInfo.IngressNetworkTrafficPerMegabytesPerHour * sum
oneMega := new(big.Float).SetInt64(1048576)
IngressNetworkTrafficPerMegabytesPerHour := new(big.Float).SetFloat64(priceInfo.IngressNetworkTrafficPerMegabytesPerHour)
// transform unit from bytes to Migabytes
s.Quo(s, oneMega)
return fmt.Sprintf(generateFloatFormat(meteringIngressPrecision), s.Mul(s, IngressNetworkTrafficPerMegabytesPerHour))
case METER_RESOURCE_TYPE_NET_EGRESS:
// unit: Megabyte, precision: 1
sum = math.Round(sum / 1048576)
return priceInfo.EgressNetworkTrafficPerMegabytesPerHour * sum
oneMega := new(big.Float).SetInt64(1048576)
EgressNetworkTrafficPerMegabytesPerHour := new(big.Float).SetPrec(meteringEgressPrecision).SetFloat64(priceInfo.EgressNetworkTrafficPerMegabytesPerHour)
// transform unit from bytes to Migabytes
s.Quo(s, oneMega)
return fmt.Sprintf(generateFloatFormat(meteringEgressPrecision), s.Mul(s, EgressNetworkTrafficPerMegabytesPerHour))
case METER_RESOURCE_TYPE_PVC:
// unit: Gigabyte, precision: 0.1
sum = math.Round(sum/1073741824*10) / 10
return priceInfo.PvcPerGigabytesPerHour * sum
oneGiga := new(big.Float).SetInt64(1073741824)
PvcPerGigabytesPerHour := new(big.Float).SetPrec(meteringPvcPrecision).SetFloat64(priceInfo.PvcPerGigabytesPerHour)
// transform unit from bytes to Gigabytes
s.Quo(s, oneGiga)
return fmt.Sprintf(generateFloatFormat(meteringPvcPrecision), s.Mul(s, PvcPerGigabytesPerHour))
}
return -1
return ""
}
}
@@ -224,9 +270,9 @@ func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float6
metricData.MetricValues[index].MaxValue = getMaxPointValue(metricValue.Series)
metricData.MetricValues[index].AvgValue = getAvgPointValue(metricValue.Series)
} else {
metricData.MetricValues[index].MinValue = (*metricValue.Sample)[1]
metricData.MetricValues[index].MaxValue = (*metricValue.Sample)[1]
metricData.MetricValues[index].AvgValue = (*metricValue.Sample)[1]
metricData.MetricValues[index].MinValue = getMinPointValue([]monitoring.Point{*metricValue.Sample})
metricData.MetricValues[index].MaxValue = getMaxPointValue([]monitoring.Point{*metricValue.Sample})
metricData.MetricValues[index].AvgValue = getAvgPointValue([]monitoring.Point{*metricValue.Sample})
}
// squash points if step is more than one hour and calculate sum and fee
@@ -241,7 +287,7 @@ func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float6
metricData.MetricValues[index].SumValue = sum
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
} else {
sum := (*metricValue.Sample)[1]
sum := getSumPointValue([]monitoring.Point{*metricValue.Sample})
metricData.MetricValues[index].SumValue = sum
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
}

View File

@@ -15,6 +15,7 @@ import (
appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/klog"
@@ -25,6 +26,8 @@ import (
"kubesphere.io/kubesphere/pkg/apiserver/query"
"kubesphere.io/kubesphere/pkg/apiserver/request"
monitoringmodel "kubesphere.io/kubesphere/pkg/models/monitoring"
"kubesphere.io/kubesphere/pkg/models/openpitrix"
"kubesphere.io/kubesphere/pkg/server/params"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
)
@@ -691,7 +694,12 @@ func (t *tenantOperator) transformMetricData(metrics monitoringmodel.Metrics) me
for _, metricValue := range metric.MetricValues {
//metricValue.SumValue
podName := metricValue.Metadata["pod"]
podsStats.Set(podName, metricName, metricValue.SumValue)
if s, err := strconv.ParseFloat(metricValue.SumValue, 64); err != nil {
klog.Error("failed to parse string to float64")
return nil
} else {
podsStats.Set(podName, metricName, s)
}
}
}
@@ -769,9 +777,15 @@ func (t *tenantOperator) updateDeploysStats(user user.Info, cluster, ns string,
return err
}
if ok, _ := t.isOpenPitrixComponent(cluster, ns, "deployment", deploy.Name); ok {
// TODO: for op deployment
continue
if ok, opName := t.isOpenPitrixComponent(cluster, ns, "deployment", deploy.Name); ok {
// for op deployment
for _, pod := range pods {
podsStat := podsStats[pod]
if err := resourceStats.GetOpenPitrixStats(opName).GetDeployStats(deploy.Name).SetPodStats(pod, podsStat); err != nil {
klog.Error(err)
return err
}
}
} else if ok, appName := t.isAppComponent(ns, "deployment", deploy.Name); ok {
// for app deployment
for _, pod := range pods {
@@ -798,7 +812,7 @@ func (t *tenantOperator) updateDeploysStats(user user.Info, cluster, ns string,
}
}
// TODO: op aggregate for deployment components
// OpenPitrix aggregate for deployment components
for _, op := range resourceStats.OpenPitrixs {
op.Aggregate()
}
@@ -833,9 +847,14 @@ func (t *tenantOperator) updateDaemonsetsStats(user user.Info, cluster, ns strin
return err
}
if ok, _ := t.isOpenPitrixComponent(cluster, ns, "daemonset", daemonset.Name); ok {
// TODO: for op daemonset
continue
if ok, opName := t.isOpenPitrixComponent(cluster, ns, "daemonset", daemonset.Name); ok {
// for op daemonset
for _, pod := range pods {
if err := resourceStats.GetOpenPitrixStats(opName).GetDaemonStats(daemonset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
} else if ok, appName := t.isAppComponent(ns, "daemonset", daemonset.Name); ok {
// for app daemonset
for _, pod := range pods {
@@ -860,7 +879,7 @@ func (t *tenantOperator) updateDaemonsetsStats(user user.Info, cluster, ns strin
// here pod stats and level struct are ready
// TODO: op aggregate for daemonset components
// OpenPitrix aggregate for daemonset components
for _, op := range resourceStats.OpenPitrixs {
op.Aggregate()
}
@@ -877,8 +896,71 @@ func (t *tenantOperator) updateDaemonsetsStats(user user.Info, cluster, ns strin
return nil
}
// TODO: include op metering part
func (t *tenantOperator) collectOpenPitrixComponents(cluster, ns string) map[string][]string {
var opComponentsMap = make(map[string][]string)
var ops []string
conditions := params.Conditions{
Match: make(map[string]string),
Fuzzy: make(map[string]string),
}
resp, err := t.opRelease.ListApplications("", cluster, ns, &conditions, 10, 0, "", false)
if err != nil {
klog.Error("failed to list op apps")
return nil
}
totalCount := resp.TotalCount
resp, err = t.opRelease.ListApplications("", cluster, ns, &conditions, totalCount, 0, "", false)
if err != nil {
klog.Error("failed to list op apps")
return nil
}
for _, item := range resp.Items {
app := item.(*openpitrix.Application)
ops = append(ops, app.Cluster.ClusterId)
}
for _, op := range ops {
app, err := t.opRelease.DescribeApplication("", cluster, ns, op)
if err != nil {
klog.Error(err)
return nil
}
for _, object := range app.ReleaseInfo {
unstructuredObj := object.(*unstructured.Unstructured)
if unstructuredObj.GetKind() == "Service" ||
unstructuredObj.GetKind() == "Deployment" ||
unstructuredObj.GetKind() == "Daemonset" ||
unstructuredObj.GetKind() == "Statefulset" {
opComponentsMap[op+":"+unstructuredObj.GetKind()] = append(opComponentsMap[unstructuredObj.GetKind()], unstructuredObj.GetName())
}
}
}
return opComponentsMap
}
func (t *tenantOperator) isOpenPitrixComponent(cluster, ns, kind, componentName string) (bool, string) {
opComponentsMap := t.collectOpenPitrixComponents(cluster, ns)
for k, v := range opComponentsMap {
kk := strings.Split(k, ":")
if len(kk) != 2 {
klog.Errorf("invalid op key %s", k)
return false, ""
}
opName := kk[0]
if kk[1] == strings.Title(kind) {
for _, svc := range v {
if componentName == svc {
return true, opName
}
}
}
}
return false, ""
}
@@ -916,9 +998,14 @@ func (t *tenantOperator) updateStatefulsetsStats(user user.Info, cluster, ns str
return err
}
if ok, _ := t.isOpenPitrixComponent(cluster, ns, "statefulset", statefulset.Name); ok {
// TODO: for op statefulset
continue
if ok, opName := t.isOpenPitrixComponent(cluster, ns, "statefulset", statefulset.Name); ok {
// for op statefulset
for _, pod := range pods {
if err := resourceStats.GetOpenPitrixStats(opName).GetStatefulsetStats(statefulset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
} else if ok, appName := t.isAppComponent(ns, "daemonset", statefulset.Name); ok {
// for app statefulset
for _, pod := range pods {
@@ -943,7 +1030,7 @@ func (t *tenantOperator) updateStatefulsetsStats(user user.Info, cluster, ns str
}
}
// TODO: op aggregate for statefulset components
// OpenPitrix aggregate for statefulset components
for _, op := range resourceStats.OpenPitrixs {
op.Aggregate()
}

View File

@@ -998,7 +998,7 @@ func (t *tenantOperator) MeteringHierarchy(user user.Info, queryParam *meteringv
podsStats := t.transformMetricData(res)
// classify pods stats
resourceStats, err := t.classifyPodStats(user, "", queryParam.NamespaceName, podsStats)
resourceStats, err := t.classifyPodStats(user, queryParam.Cluster, queryParam.NamespaceName, podsStats)
if err != nil {
klog.Error(err)
return metering.ResourceStatistic{}, err