Integate metering module and support metering data csv format export.

Signed-off-by: Rao Yunkun <yunkunrao@yunify.com>
This commit is contained in:
Rao Yunkun
2021-03-25 10:01:38 +08:00
parent ac275b6e98
commit d08e402384
23 changed files with 664 additions and 350 deletions

View File

@@ -1,12 +1,17 @@
package metering
type PriceInfo struct {
Currency string `json:"currency" description:"currency"`
CpuPerCorePerHour float64 `json:"cpu_per_core_per_hour,omitempty" description:"cpu price"`
MemPerGigabytesPerHour float64 `json:"mem_per_gigabytes_per_hour,omitempty" description:"mem price"`
IngressNetworkTrafficPerGiagabytesPerHour float64 `json:"ingress_network_traffic_per_giagabytes_per_hour,omitempty" description:"ingress price"`
EgressNetworkTrafficPerGiagabytesPerHour float64 `json:"egress_network_traffic_per_gigabytes_per_hour,omitempty" description:"egress price"`
PvcPerGigabytesPerHour float64 `json:"pvc_per_gigabytes_per_hour,omitempty" description:"pvc price"`
Currency string `json:"currency" description:"currency"`
CpuPerCorePerHour float64 `json:"cpu_per_core_per_hour,omitempty" description:"cpu price"`
MemPerGigabytesPerHour float64 `json:"mem_per_gigabytes_per_hour,omitempty" description:"mem price"`
IngressNetworkTrafficPerMegabytesPerHour float64 `json:"ingress_network_traffic_per_megabytes_per_hour,omitempty" description:"ingress price"`
EgressNetworkTrafficPerMegabytesPerHour float64 `json:"egress_network_traffic_per_megabytes_per_hour,omitempty" description:"egress price"`
PvcPerGigabytesPerHour float64 `json:"pvc_per_gigabytes_per_hour,omitempty" description:"pvc price"`
}
type PriceResponse struct {
RetentionDay string `json:"retention_day"`
PriceInfo `json:",inline"`
}
// currently init method fill illegal value to hint that metering config file was not mounted yet
@@ -14,8 +19,8 @@ func (p *PriceInfo) Init() {
p.Currency = ""
p.CpuPerCorePerHour = -1
p.MemPerGigabytesPerHour = -1
p.IngressNetworkTrafficPerGiagabytesPerHour = -1
p.EgressNetworkTrafficPerGiagabytesPerHour = -1
p.IngressNetworkTrafficPerMegabytesPerHour = -1
p.EgressNetworkTrafficPerMegabytesPerHour = -1
p.PvcPerGigabytesPerHour = -1
}
@@ -47,46 +52,105 @@ func (ps *PodsStats) Set(podName, meterName string, value float64) {
}
}
type AppStatistic struct {
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
Services map[string]*ServiceStatistic `json:"services" description:"services"`
type OpenPitrixStatistic struct {
AppStatistic
}
func (as *AppStatistic) GetServiceStats(name string) *ServiceStatistic {
if as.Services == nil {
as.Services = make(map[string]*ServiceStatistic)
type AppStatistic struct {
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
Deploys map[string]*DeploymentStatistic `json:"deployments" description:"deployment statistic"`
Statefulsets map[string]*StatefulsetStatistic `json:"statefulsets" description:"statefulset statistic"`
Daemonsets map[string]*DaemonsetStatistic `json:"daemonsets" description:"daemonsets statistics"`
}
func (as *AppStatistic) GetDeployStats(name string) *DeploymentStatistic {
if as.Deploys == nil {
as.Deploys = make(map[string]*DeploymentStatistic)
}
if as.Services[name] == nil {
as.Services[name] = &ServiceStatistic{}
if as.Deploys[name] == nil {
as.Deploys[name] = &DeploymentStatistic{}
}
return as.Services[name]
return as.Deploys[name]
}
func (as *AppStatistic) GetDaemonStats(name string) *DaemonsetStatistic {
if as.Daemonsets == nil {
as.Daemonsets = make(map[string]*DaemonsetStatistic)
}
if as.Daemonsets[name] == nil {
as.Daemonsets[name] = &DaemonsetStatistic{}
}
return as.Daemonsets[name]
}
func (as *AppStatistic) GetStatefulsetStats(name string) *StatefulsetStatistic {
if as.Statefulsets == nil {
as.Statefulsets = make(map[string]*StatefulsetStatistic)
}
if as.Statefulsets[name] == nil {
as.Statefulsets[name] = &StatefulsetStatistic{}
}
return as.Statefulsets[name]
}
func (as *AppStatistic) Aggregate() {
if as.Services == nil {
if as.Deploys == nil && as.Statefulsets == nil && as.Daemonsets == nil {
return
}
// remove duplicate pods which were selected by different svc
podsMap := make(map[string]struct{})
for _, svcObj := range as.Services {
for podName, podObj := range svcObj.Pods {
if _, ok := podsMap[podName]; ok {
continue
} else {
podsMap[podName] = struct{}{}
}
as.CPUUsage += podObj.CPUUsage
as.MemoryUsageWoCache += podObj.MemoryUsageWoCache
as.NetBytesTransmitted += podObj.NetBytesTransmitted
as.NetBytesReceived += podObj.NetBytesReceived
as.PVCBytesTotal += podObj.PVCBytesTotal
// aggregate deployment stats
for _, deployObj := range as.Deploys {
for _, podObj := range deployObj.Pods {
deployObj.CPUUsage += podObj.CPUUsage
deployObj.MemoryUsageWoCache += podObj.MemoryUsageWoCache
deployObj.NetBytesTransmitted += podObj.NetBytesTransmitted
deployObj.NetBytesReceived += podObj.NetBytesReceived
deployObj.PVCBytesTotal += podObj.PVCBytesTotal
}
as.CPUUsage += deployObj.CPUUsage
as.MemoryUsageWoCache += deployObj.MemoryUsageWoCache
as.NetBytesTransmitted += deployObj.NetBytesTransmitted
as.NetBytesReceived += deployObj.NetBytesReceived
as.PVCBytesTotal += deployObj.PVCBytesTotal
}
// aggregate statfulset stats
for _, statfulObj := range as.Statefulsets {
for _, podObj := range statfulObj.Pods {
statfulObj.CPUUsage += podObj.CPUUsage
statfulObj.MemoryUsageWoCache += podObj.MemoryUsageWoCache
statfulObj.NetBytesTransmitted += podObj.NetBytesTransmitted
statfulObj.NetBytesReceived += podObj.NetBytesReceived
statfulObj.PVCBytesTotal += podObj.PVCBytesTotal
}
as.CPUUsage += statfulObj.CPUUsage
as.MemoryUsageWoCache += statfulObj.MemoryUsageWoCache
as.NetBytesTransmitted += statfulObj.NetBytesTransmitted
as.NetBytesReceived += statfulObj.NetBytesReceived
as.PVCBytesTotal += statfulObj.PVCBytesTotal
}
// aggregate daemonset stats
for _, daemonsetObj := range as.Daemonsets {
for _, podObj := range daemonsetObj.Pods {
daemonsetObj.CPUUsage += podObj.CPUUsage
daemonsetObj.MemoryUsageWoCache += podObj.MemoryUsageWoCache
daemonsetObj.NetBytesTransmitted += podObj.NetBytesTransmitted
daemonsetObj.NetBytesReceived += podObj.NetBytesReceived
daemonsetObj.PVCBytesTotal += podObj.PVCBytesTotal
}
as.CPUUsage += daemonsetObj.CPUUsage
as.MemoryUsageWoCache += daemonsetObj.MemoryUsageWoCache
as.NetBytesTransmitted += daemonsetObj.NetBytesTransmitted
as.NetBytesReceived += daemonsetObj.NetBytesReceived
as.PVCBytesTotal += daemonsetObj.PVCBytesTotal
}
return
}
type ServiceStatistic struct {
@@ -251,13 +315,28 @@ func (ds *DaemonsetStatistic) Aggregate() {
}
type ResourceStatistic struct {
Apps map[string]*AppStatistic `json:"apps" description:"app statistic"`
Services map[string]*ServiceStatistic `json:"services" description:"service statistic"`
// openpitrix statistic
OpenPitrixs map[string]*OpenPitrixStatistic `json:"openpitrixs" description:"openpitrix statistic"`
// app crd statistic
Apps map[string]*AppStatistic `json:"apps" description:"app statistic"`
// k8s workload only which exclude app and op
Deploys map[string]*DeploymentStatistic `json:"deployments" description:"deployment statistic"`
Statefulsets map[string]*StatefulsetStatistic `json:"statefulsets" description:"statefulset statistic"`
Daemonsets map[string]*DaemonsetStatistic `json:"daemonsets" description:"daemonsets statistics"`
}
func (rs *ResourceStatistic) GetOpenPitrixStats(name string) *OpenPitrixStatistic {
if rs.OpenPitrixs == nil {
rs.OpenPitrixs = make(map[string]*OpenPitrixStatistic)
}
if rs.OpenPitrixs[name] == nil {
rs.OpenPitrixs[name] = &OpenPitrixStatistic{}
}
return rs.OpenPitrixs[name]
}
func (rs *ResourceStatistic) GetAppStats(name string) *AppStatistic {
if rs.Apps == nil {
rs.Apps = make(map[string]*AppStatistic)
@@ -268,16 +347,6 @@ func (rs *ResourceStatistic) GetAppStats(name string) *AppStatistic {
return rs.Apps[name]
}
func (rs *ResourceStatistic) GetServiceStats(name string) *ServiceStatistic {
if rs.Services == nil {
rs.Services = make(map[string]*ServiceStatistic)
}
if rs.Services[name] == nil {
rs.Services[name] = &ServiceStatistic{}
}
return rs.Services[name]
}
func (rs *ResourceStatistic) GetDeployStats(name string) *DeploymentStatistic {
if rs.Deploys == nil {
rs.Deploys = make(map[string]*DeploymentStatistic)

View File

@@ -59,7 +59,7 @@ type MonitoringOperator interface {
// meter
GetNamedMetersOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) (Metrics, error)
GetNamedMeters(metrics []string, time time.Time, opt monitoring.QueryOption) (Metrics, error)
GetAppComponentsMap(ns string, apps []string) map[string][]string
GetAppWorkloads(ns string, apps []string) map[string][]string
GetSerivePodsMap(ns string, services []string) map[string][]string
}
@@ -432,10 +432,10 @@ func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end
}
// query time range: (start, end], so here we need to exclude start itself.
if start.Add(step).After(end) {
if start.Add(time.Hour).After(end) {
start = end
} else {
start = start.Add(step)
start = start.Add(time.Hour)
}
var opts []monitoring.QueryOption
@@ -444,10 +444,10 @@ func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end
opts = append(opts, monitoring.MeterOption{
Start: start,
End: end,
Step: step,
Step: time.Hour,
})
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, step, opts)
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, time.Hour, opts)
sMap := generateScalingFactorMap(step)
for i, _ := range ress {
@@ -471,7 +471,7 @@ func (mo monitoringOperator) GetNamedMeters(meters []string, time time.Time, opt
return metersPerHour, nil
}
func (mo monitoringOperator) GetAppComponentsMap(ns string, apps []string) map[string][]string {
func (mo monitoringOperator) GetAppWorkloads(ns string, apps []string) map[string][]string {
componentsMap := make(map[string][]string)
applicationList := []*appv1beta1.Application{}
@@ -584,7 +584,7 @@ func (mo monitoringOperator) GetSerivePodsMap(ns string, services []string) map[
svcSelector := svcObj.Spec.Selector
if len(svcSelector) == 0 {
return svcPodsMap
continue
}
svcLabels := labels.Set{}

View File

@@ -71,12 +71,12 @@ var MeterResourceMap = map[string]int{
}
type PriceInfo struct {
CpuPerCorePerHour float64 `json:"cpuPerCorePerHour" yaml:"cpuPerCorePerHour"`
MemPerGigabytesPerHour float64 `json:"memPerGigabytesPerHour" yaml:"memPerGigabytesPerHour"`
IngressNetworkTrafficPerGiagabytesPerHour float64 `json:"ingressNetworkTrafficPerGiagabytesPerHour" yaml:"ingressNetworkTrafficPerGiagabytesPerHour"`
EgressNetworkTrafficPerGigabytesPerHour float64 `json:"egressNetworkTrafficPerGigabytesPerHour" yaml:"egressNetworkTrafficPerGigabytesPerHour"`
PvcPerGigabytesPerHour float64 `json:"pvcPerGigabytesPerHour" yaml:"pvcPerGigabytesPerHour"`
CurrencyUnit string `json:"currencyUnit" yaml:"currencyUnit"`
CpuPerCorePerHour float64 `json:"cpuPerCorePerHour" yaml:"cpuPerCorePerHour"`
MemPerGigabytesPerHour float64 `json:"memPerGigabytesPerHour" yaml:"memPerGigabytesPerHour"`
IngressNetworkTrafficPerMegabytesPerHour float64 `json:"ingressNetworkTrafficPerMegabytesPerHour" yaml:"ingressNetworkTrafficPerGiagabytesPerHour"`
EgressNetworkTrafficPerMegabytesPerHour float64 `json:"egressNetworkTrafficPerMegabytesPerHour" yaml:"egressNetworkTrafficPerGigabytesPerHour"`
PvcPerGigabytesPerHour float64 `json:"pvcPerGigabytesPerHour" yaml:"pvcPerGigabytesPerHour"`
CurrencyUnit string `json:"currencyUnit" yaml:"currencyUnit"`
}
type Billing struct {
@@ -84,7 +84,8 @@ type Billing struct {
}
type MeterConfig struct {
Billing Billing `json:"billing" yaml:"billing"`
RetentionDay string `json:"retentionDay" yaml:"retentionDay"`
Billing Billing `json:"billing" yaml:"billing"`
}
func (mc MeterConfig) GetPriceInfo() PriceInfo {
@@ -197,11 +198,11 @@ func getFeeWithMeterName(meterName string, sum float64) float64 {
case METER_RESOURCE_TYPE_NET_INGRESS:
// unit: Megabyte, precision: 1
sum = math.Round(sum / 1048576)
return priceInfo.IngressNetworkTrafficPerGiagabytesPerHour * sum
return priceInfo.IngressNetworkTrafficPerMegabytesPerHour * sum
case METER_RESOURCE_TYPE_NET_EGRESS:
// unit: Megabyte, precision:
// unit: Megabyte, precision: 1
sum = math.Round(sum / 1048576)
return priceInfo.EgressNetworkTrafficPerGigabytesPerHour * sum
return priceInfo.EgressNetworkTrafficPerMegabytesPerHour * sum
case METER_RESOURCE_TYPE_PVC:
// unit: Gigabyte, precision: 0.1
sum = math.Round(sum/1073741824*10) / 10
@@ -217,37 +218,56 @@ func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float6
metricData := metric.MetricData
for index, metricValue := range metricData.MetricValues {
var points []monitoring.Point
// calulate min, max, avg value first, then squash points with factor
if metricData.MetricType == monitoring.MetricTypeMatrix {
points = metricValue.Series
metricData.MetricValues[index].MinValue = getMinPointValue(metricValue.Series)
metricData.MetricValues[index].MaxValue = getMaxPointValue(metricValue.Series)
metricData.MetricValues[index].AvgValue = getAvgPointValue(metricValue.Series)
} else {
points = append(points, *metricValue.Sample)
metricData.MetricValues[index].MinValue = (*metricValue.Sample)[1]
metricData.MetricValues[index].MaxValue = (*metricValue.Sample)[1]
metricData.MetricValues[index].AvgValue = (*metricValue.Sample)[1]
}
// squash points if step is more than one hour and calculate sum and fee
var factor float64 = 1
if scalingMap != nil {
factor = scalingMap[metricName]
}
metricData.MetricValues[index].Series = squashPoints(metricData.MetricValues[index].Series, int(factor))
if len(points) == 1 {
sample := points[0]
sum := sample[1] * factor
metricData.MetricValues[index].MinValue = sample[1]
metricData.MetricValues[index].MaxValue = sample[1]
metricData.MetricValues[index].AvgValue = sample[1]
if metricData.MetricType == monitoring.MetricTypeMatrix {
sum := getSumPointValue(metricData.MetricValues[index].Series)
metricData.MetricValues[index].SumValue = sum
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
} else {
sum := getSumPointValue(points) * factor
metricData.MetricValues[index].MinValue = getMinPointValue(points)
metricData.MetricValues[index].MaxValue = getMaxPointValue(points)
metricData.MetricValues[index].AvgValue = getAvgPointValue(points)
sum := (*metricValue.Sample)[1]
metricData.MetricValues[index].SumValue = sum
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
}
metricData.MetricValues[index].CurrencyUnit = getCurrencyUnit()
metricData.MetricValues[index].ResourceUnit = getResourceUnit(metricName)
}
return metricData
}
func squashPoints(input []monitoring.Point, factor int) (output []monitoring.Point) {
if factor <= 0 {
klog.Errorln("factor should be positive")
return nil
}
for i := 0; i < len(input); i++ {
if i%factor == 0 {
output = append([]monitoring.Point{input[len(input)-1-i]}, output...)
} else {
output[0] = output[0].Add(input[len(input)-1-i])
}
}
return output
}

View File

@@ -598,7 +598,7 @@ func (t *tenantOperator) processApplicationMetersQuery(meters []string, q QueryO
klog.Error(err.Error())
return
}
componentsMap := t.mo.GetAppComponentsMap(aso.NamespaceName, aso.Applications)
componentsMap := t.mo.GetAppWorkloads(aso.NamespaceName, aso.Applications)
for k, _ := range componentsMap {
opt := monitoring.ApplicationOption{
@@ -698,90 +698,24 @@ func (t *tenantOperator) transformMetricData(metrics monitoringmodel.Metrics) me
return podsStats
}
func (t *tenantOperator) classifyPodStats(user user.Info, ns string, podsStats metering.PodsStats) (resourceStats metering.ResourceStatistic, err error) {
if err = t.updateServicesStats(user, ns, podsStats, &resourceStats); err != nil {
func (t *tenantOperator) classifyPodStats(user user.Info, cluster, ns string, podsStats metering.PodsStats) (resourceStats metering.ResourceStatistic, err error) {
// classify pod stats into following 3 levels under spedified namespace and user info
// 1. project -> workload(deploy, sts, ds) -> pod
// 2. project -> app -> workload(deploy, sts, ds) -> pod
// 3. project -> op -> workload(deploy, sts, ds) -> pod
if err = t.updateDeploysStats(user, cluster, ns, podsStats, &resourceStats); err != nil {
return
}
if err = t.updateDeploysStats(user, ns, podsStats, &resourceStats); err != nil {
if err = t.updateDaemonsetsStats(user, cluster, ns, podsStats, &resourceStats); err != nil {
return
}
if err = t.updateDaemonsetsStats(user, ns, podsStats, &resourceStats); err != nil {
return
}
if err = t.updateStatefulsetsStats(user, ns, podsStats, &resourceStats); err != nil {
if err = t.updateStatefulsetsStats(user, cluster, ns, podsStats, &resourceStats); err != nil {
return
}
return
}
func (t *tenantOperator) updateServicesStats(user user.Info, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
svcList, err := t.listServices(user, ns)
if err != nil {
return err
}
for _, svc := range svcList.Items {
if svc.Annotations[constants.ApplicationReleaseName] != "" &&
svc.Annotations[constants.ApplicationReleaseNS] != "" &&
t.isOpNamespace(ns) {
// for op svc
// currently we do NOT include op svc
continue
} else {
appName, nameOK := svc.Labels[constants.ApplicationName]
appVersion, versionOK := svc.Labels[constants.ApplicationVersion]
svcPodsMap := t.mo.GetSerivePodsMap(ns, []string{svc.Name})
pods := svcPodsMap[svc.Name]
if nameOK && versionOK {
// for app crd svc
for _, pod := range pods {
podStat := podsStats[pod]
if podStat == nil {
klog.Warningf("%v not found", pod)
continue
}
appFullName := appName + ":" + appVersion
if err := resourceStats.GetAppStats(appFullName).GetServiceStats(svc.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
} else {
// for k8s svc
for _, pod := range pods {
if err := resourceStats.GetServiceStats(svc.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
}
}
}
// aggregate svc data
for _, app := range resourceStats.Apps {
for _, svc := range app.Services {
svc.Aggregate()
}
app.Aggregate()
}
for _, svc := range resourceStats.Services {
svc.Aggregate()
}
return nil
}
func (t *tenantOperator) listServices(user user.Info, ns string) (*corev1.ServiceList, error) {
svcScope := request.NamespaceScope
@@ -817,7 +751,11 @@ func (t *tenantOperator) listServices(user user.Info, ns string) (*corev1.Servic
return svcs, nil
}
func (t *tenantOperator) updateDeploysStats(user user.Info, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
// updateDeploysStats will update deployment field in resource stats struct with pod stats data and deployments will be classified into 3 classes:
// 1. openpitrix deployments
// 2. app deployments
// 3. k8s deploymnets
func (t *tenantOperator) updateDeploysStats(user user.Info, cluster, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
deployList, err := t.listDeploys(user, ns)
if err != nil {
return err
@@ -825,44 +763,63 @@ func (t *tenantOperator) updateDeploysStats(user user.Info, ns string, podsStats
for _, deploy := range deployList.Items {
if deploy.Annotations[constants.ApplicationReleaseName] != "" &&
deploy.Annotations[constants.ApplicationReleaseNS] != "" &&
t.isOpNamespace(ns) {
// for op deploy
// currently we do NOT include op deploy
continue
} else {
_, appNameOK := deploy.Labels[constants.ApplicationName]
_, appVersionOK := deploy.Labels[constants.ApplicationVersion]
pods, err := t.listPods(user, ns, deploy.Spec.Selector)
if err != nil {
klog.Error(err)
return err
}
pods, err := t.listPods(user, ns, deploy.Spec.Selector)
if err != nil {
klog.Error(err)
return err
if ok, _ := t.isOpenPitrixComponent(cluster, ns, "deployment", deploy.Name); ok {
// TODO: for op deployment
continue
} else if ok, appName := t.isAppComponent(ns, "deployment", deploy.Name); ok {
// for app deployment
for _, pod := range pods {
podsStat := podsStats[pod]
if podsStat == nil {
klog.Warningf("%v not found", pod)
continue
}
if err := resourceStats.GetAppStats(appName).GetDeployStats(deploy.Name).SetPodStats(pod, podsStat); err != nil {
klog.Error(err)
return err
}
}
if appNameOK && appVersionOK {
// for app crd svc
continue
} else {
// for k8s svc
for _, pod := range pods {
if err := resourceStats.GetDeployStats(deploy.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
} else {
// for k8s deployment only
for _, pod := range pods {
if err := resourceStats.GetDeployStats(deploy.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
}
}
// TODO: op aggregate for deployment components
for _, op := range resourceStats.OpenPitrixs {
op.Aggregate()
}
// app aggregate for deployment components
for _, app := range resourceStats.Apps {
app.Aggregate()
}
// k8s aggregate for deployment components
for _, deploy := range resourceStats.Deploys {
deploy.Aggregate()
}
return nil
}
func (t *tenantOperator) updateDaemonsetsStats(user user.Info, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
// updateDaemonsetsStats will update daemonsets field in resource stats struct with pod stats data and daemonsets will be classified into 3 classes:
// 1. openpitrix daemonsets
// 2. app daemonsets
// 3. k8s daemonsets
func (t *tenantOperator) updateDaemonsetsStats(user user.Info, cluster, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
daemonsetList, err := t.listDaemonsets(user, ns)
if err != nil {
return err
@@ -870,59 +827,81 @@ func (t *tenantOperator) updateDaemonsetsStats(user user.Info, ns string, podsSt
for _, daemonset := range daemonsetList.Items {
if daemonset.Annotations["meta.helm.sh/release-name"] != "" &&
daemonset.Annotations["meta.helm.sh/release-namespace"] != "" &&
t.isOpNamespace(ns) {
// for op deploy
// currently we do NOT include op deploy
pods, err := t.listPods(user, ns, daemonset.Spec.Selector)
if err != nil {
klog.Error(err)
return err
}
if ok, _ := t.isOpenPitrixComponent(cluster, ns, "daemonset", daemonset.Name); ok {
// TODO: for op daemonset
continue
} else {
appName := daemonset.Labels[constants.ApplicationName]
appVersion := daemonset.Labels[constants.ApplicationVersion]
pods, err := t.listPods(user, ns, daemonset.Spec.Selector)
if err != nil {
klog.Error(err)
return err
} else if ok, appName := t.isAppComponent(ns, "daemonset", daemonset.Name); ok {
// for app daemonset
for _, pod := range pods {
// aggregate order is from bottom(pods) to top(app), we should create outer field if not exists
// and then set pod stats data, the direction is as follows:
// app field(create if not existed) -> statefulsets field(create if not existed) -> pod
if err := resourceStats.GetAppStats(appName).GetDaemonStats(daemonset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
if appName != "" && appVersion != "" {
// for app crd svc
continue
} else {
// for k8s svc
for _, pod := range pods {
if err := resourceStats.GetDaemonsetStats(daemonset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
} else {
// for k8s daemonset
for _, pod := range pods {
if err := resourceStats.GetDaemonsetStats(daemonset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
}
}
// here pod stats and level struct are ready
// TODO: op aggregate for daemonset components
for _, op := range resourceStats.OpenPitrixs {
op.Aggregate()
}
// app aggregate for daemonset components
for _, app := range resourceStats.Apps {
app.Aggregate()
}
// k8s aggregate for daemonset components
for _, daemonset := range resourceStats.Daemonsets {
daemonset.Aggregate()
}
return nil
}
func (t *tenantOperator) isOpNamespace(ns string) bool {
nsObj, err := t.k8sclient.CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{})
if err != nil {
return false
}
ws := nsObj.Labels[constants.WorkspaceLabelKey]
if len(ws) != 0 && ws != "system-workspace" {
return true
}
return false
// TODO: include op metering part
func (t *tenantOperator) isOpenPitrixComponent(cluster, ns, kind, componentName string) (bool, string) {
return false, ""
}
func (t *tenantOperator) updateStatefulsetsStats(user user.Info, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
func (t *tenantOperator) isAppComponent(ns, kind, componentName string) (bool, string) {
appWorkloads := t.mo.GetAppWorkloads(ns, nil)
for appName, cList := range appWorkloads {
for _, component := range cList {
if component == fmt.Sprintf("%s:%s", strings.Title(kind), componentName) {
return true, appName
}
}
}
return false, ""
}
// updateStatefulsetsStats will update statefulsets field in resource stats struct with pod stats data and statefulsets will be classified into 3 classes:
// 1. openpitrix statefulsets
// 2. app statefulsets
// 3. k8s statefulsets
func (t *tenantOperator) updateStatefulsetsStats(user user.Info, cluster, ns string, podsStats metering.PodsStats, resourceStats *metering.ResourceStatistic) error {
statefulsetsList, err := t.listStatefulsets(user, ns)
if err != nil {
return err
@@ -930,37 +909,51 @@ func (t *tenantOperator) updateStatefulsetsStats(user user.Info, ns string, pods
for _, statefulset := range statefulsetsList.Items {
if statefulset.Annotations[constants.ApplicationReleaseName] != "" &&
statefulset.Annotations[constants.ApplicationReleaseNS] != "" &&
t.isOpNamespace(ns) {
// for op deploy
// currently we do NOT include op deploy
// query pod list under the statefulset within the namespace
pods, err := t.listPods(user, ns, statefulset.Spec.Selector)
if err != nil {
klog.Error(err)
return err
}
if ok, _ := t.isOpenPitrixComponent(cluster, ns, "statefulset", statefulset.Name); ok {
// TODO: for op statefulset
continue
} else {
appName := statefulset.Labels[constants.ApplicationName]
appVersion := statefulset.Labels[constants.ApplicationVersion]
pods, err := t.listPods(user, ns, statefulset.Spec.Selector)
if err != nil {
klog.Error(err)
return err
} else if ok, appName := t.isAppComponent(ns, "daemonset", statefulset.Name); ok {
// for app statefulset
for _, pod := range pods {
// aggregate order is from bottom(pods) to top(app), we should create outer field if not exists
// and then set pod stats data, the direction is as follows:
// app field(create if not existed) -> statefulsets field(create if not existed) -> pod
if err := resourceStats.GetAppStats(appName).GetStatefulsetStats(statefulset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
if appName != "" && appVersion != "" {
// for app crd svc
continue
} else {
// for k8s svc
for _, pod := range pods {
if err := resourceStats.GetStatefulsetStats(statefulset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
} else {
// for k8s statefulset
for _, pod := range pods {
// same as above, the direction is similar:
// k8s field(create if not existed) -> statefulsets field(create if not existed) -> pod
if err := resourceStats.GetStatefulsetStats(statefulset.Name).SetPodStats(pod, podsStats[pod]); err != nil {
klog.Error(err)
return err
}
}
}
}
// TODO: op aggregate for statefulset components
for _, op := range resourceStats.OpenPitrixs {
op.Aggregate()
}
// app aggregate for statefulset components
for _, app := range resourceStats.Apps {
app.Aggregate()
}
// k8s aggregate for statefulset components
for _, statefulset := range resourceStats.Statefulsets {
statefulset.Aggregate()
}
@@ -1048,6 +1041,16 @@ func (t *tenantOperator) listDeploys(user user.Info, ns string) (*appv1.Deployme
return deploys, nil
}
func (t *tenantOperator) getAppNameFromLabels(labels map[string]string) string {
appName := labels[constants.ApplicationName]
appVersion := labels[constants.ApplicationVersion]
if appName == "" || appVersion == "" {
return ""
}
return fmt.Sprintf("%s:%s", appName, appVersion)
}
func (t *tenantOperator) listDaemonsets(user user.Info, ns string) (*appv1.DaemonSetList, error) {
dsScope := request.NamespaceScope

View File

@@ -24,6 +24,8 @@ import (
"strings"
"time"
"kubesphere.io/kubesphere/pkg/models/openpitrix"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -104,9 +106,15 @@ type tenantOperator struct {
lo logging.LoggingOperator
auditing auditing.Interface
mo monitoring.MonitoringOperator
opRelease openpitrix.ReleaseInterface
}
func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ksclient kubesphere.Interface, evtsClient eventsclient.Client, loggingClient loggingclient.Client, auditingclient auditingclient.Client, am am.AccessManagementInterface, authorizer authorizer.Authorizer, monitoringclient monitoringclient.Interface, resourceGetter *resourcev1alpha3.ResourceGetter) Interface {
var openpitrixRelease openpitrix.ReleaseInterface
if ksclient != nil {
openpitrixRelease = openpitrix.NewOpenpitrixOperator(informers, ksclient, nil)
}
return &tenantOperator{
am: am,
authorizer: authorizer,
@@ -117,6 +125,7 @@ func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ks
lo: logging.NewLoggingOperator(loggingClient),
auditing: auditing.NewEventsOperator(auditingclient),
mo: monitoring.NewMonitoringOperator(monitoringclient, nil, k8sclient, informers, resourceGetter),
opRelease: openpitrixRelease,
}
}
@@ -989,7 +998,7 @@ func (t *tenantOperator) MeteringHierarchy(user user.Info, queryParam *meteringv
podsStats := t.transformMetricData(res)
// classify pods stats
resourceStats, err := t.classifyPodStats(user, queryParam.NamespaceName, podsStats)
resourceStats, err := t.classifyPodStats(user, "", queryParam.NamespaceName, podsStats)
if err != nil {
klog.Error(err)
return metering.ResourceStatistic{}, err