299
pkg/models/metering/type.go
Normal file
299
pkg/models/metering/type.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package metering
|
||||
|
||||
type PriceInfo struct {
|
||||
Currency string `json:"currency" description:"currency"`
|
||||
CpuPerCorePerHour float64 `json:"cpu_per_core_per_hour,omitempty" description:"cpu price"`
|
||||
MemPerGigabytesPerHour float64 `json:"mem_per_gigabytes_per_hour,omitempty" description:"mem price"`
|
||||
IngressNetworkTrafficPerGiagabytesPerHour float64 `json:"ingress_network_traffic_per_giagabytes_per_hour,omitempty" description:"ingress price"`
|
||||
EgressNetworkTrafficPerGiagabytesPerHour float64 `json:"egress_network_traffic_per_gigabytes_per_hour,omitempty" description:"egress price"`
|
||||
PvcPerGigabytesPerHour float64 `json:"pvc_per_gigabytes_per_hour,omitempty" description:"pvc price"`
|
||||
}
|
||||
|
||||
type PodStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" desription:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
}
|
||||
|
||||
type PodsStats map[string]*PodStatistic
|
||||
|
||||
func (ps *PodsStats) Set(podName, meterName string, value float64) {
|
||||
if _, ok := (*ps)[podName]; !ok {
|
||||
(*ps)[podName] = &PodStatistic{}
|
||||
}
|
||||
switch meterName {
|
||||
case "meter_pod_cpu_usage":
|
||||
(*ps)[podName].CPUUsage = value
|
||||
case "meter_pod_memory_usage_wo_cache":
|
||||
(*ps)[podName].MemoryUsageWoCache = value
|
||||
case "meter_pod_net_bytes_transmitted":
|
||||
(*ps)[podName].NetBytesTransmitted = value
|
||||
case "meter_pod_net_bytes_received":
|
||||
(*ps)[podName].NetBytesReceived = value
|
||||
case "meter_pod_pvc_bytes_total":
|
||||
(*ps)[podName].PVCBytesTotal = value
|
||||
}
|
||||
}
|
||||
|
||||
type AppStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Services map[string]*ServiceStatistic `json:"services" description:"services"`
|
||||
}
|
||||
|
||||
func (as *AppStatistic) GetServiceStats(name string) *ServiceStatistic {
|
||||
if as.Services == nil {
|
||||
as.Services = make(map[string]*ServiceStatistic)
|
||||
}
|
||||
if as.Services[name] == nil {
|
||||
as.Services[name] = &ServiceStatistic{}
|
||||
}
|
||||
return as.Services[name]
|
||||
}
|
||||
|
||||
func (as *AppStatistic) Aggregate() {
|
||||
if as.Services == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// remove duplicate pods which were selected by different svc
|
||||
podsMap := make(map[string]struct{})
|
||||
for _, svcObj := range as.Services {
|
||||
for podName, podObj := range svcObj.Pods {
|
||||
if _, ok := podsMap[podName]; ok {
|
||||
continue
|
||||
} else {
|
||||
podsMap[podName] = struct{}{}
|
||||
}
|
||||
as.CPUUsage += podObj.CPUUsage
|
||||
as.MemoryUsageWoCache += podObj.MemoryUsageWoCache
|
||||
as.NetBytesTransmitted += podObj.NetBytesTransmitted
|
||||
as.NetBytesReceived += podObj.NetBytesReceived
|
||||
as.PVCBytesTotal += podObj.PVCBytesTotal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ServiceStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" desription:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ss *ServiceStatistic) SetPodStats(name string, podStat *PodStatistic) error {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ss.Pods[name] = podStat
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ServiceStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ss.Pods[name] == nil {
|
||||
ss.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ss.Pods[name]
|
||||
}
|
||||
|
||||
func (ss *ServiceStatistic) Aggregate() {
|
||||
if ss.Pods == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for key := range ss.Pods {
|
||||
ss.CPUUsage += ss.GetPodStats(key).CPUUsage
|
||||
ss.MemoryUsageWoCache += ss.GetPodStats(key).MemoryUsageWoCache
|
||||
ss.NetBytesTransmitted += ss.GetPodStats(key).NetBytesTransmitted
|
||||
ss.NetBytesReceived += ss.GetPodStats(key).NetBytesReceived
|
||||
}
|
||||
}
|
||||
|
||||
type DeploymentStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" desciption:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ds *DeploymentStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ds.Pods[name] == nil {
|
||||
ds.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ds.Pods[name]
|
||||
}
|
||||
|
||||
func (ds *DeploymentStatistic) SetPodStats(name string, podStat *PodStatistic) error {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ds.Pods[name] = podStat
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *DeploymentStatistic) Aggregate() {
|
||||
if ds.Pods == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for key := range ds.Pods {
|
||||
ds.CPUUsage += ds.GetPodStats(key).CPUUsage
|
||||
ds.MemoryUsageWoCache += ds.GetPodStats(key).MemoryUsageWoCache
|
||||
ds.NetBytesTransmitted += ds.GetPodStats(key).NetBytesTransmitted
|
||||
ds.NetBytesReceived += ds.GetPodStats(key).NetBytesReceived
|
||||
ds.PVCBytesTotal += ds.GetPodStats(key).PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type StatefulsetStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ss *StatefulsetStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ss.Pods[name] == nil {
|
||||
ss.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ss.Pods[name]
|
||||
}
|
||||
|
||||
func (ss *StatefulsetStatistic) SetPodStats(name string, podStat *PodStatistic) error {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ss.Pods[name] = podStat
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *StatefulsetStatistic) Aggregate() {
|
||||
if ss.Pods == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for key := range ss.Pods {
|
||||
ss.CPUUsage += ss.GetPodStats(key).CPUUsage
|
||||
ss.MemoryUsageWoCache += ss.GetPodStats(key).MemoryUsageWoCache
|
||||
ss.NetBytesTransmitted += ss.GetPodStats(key).NetBytesTransmitted
|
||||
ss.NetBytesReceived += ss.GetPodStats(key).NetBytesReceived
|
||||
ss.PVCBytesTotal += ss.GetPodStats(key).PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type DaemonsetStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ds *DaemonsetStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ds.Pods[name] == nil {
|
||||
ds.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ds.Pods[name]
|
||||
}
|
||||
|
||||
func (ds *DaemonsetStatistic) SetPodStats(name string, podStat *PodStatistic) error {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ds.Pods[name] = podStat
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *DaemonsetStatistic) Aggregate() {
|
||||
if ds.Pods == nil {
|
||||
return
|
||||
}
|
||||
for key := range ds.Pods {
|
||||
ds.CPUUsage += ds.GetPodStats(key).CPUUsage
|
||||
ds.MemoryUsageWoCache += ds.GetPodStats(key).MemoryUsageWoCache
|
||||
ds.NetBytesTransmitted += ds.GetPodStats(key).NetBytesTransmitted
|
||||
ds.NetBytesReceived += ds.GetPodStats(key).NetBytesReceived
|
||||
ds.PVCBytesTotal += ds.GetPodStats(key).PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type ResourceStatistic struct {
|
||||
Apps map[string]*AppStatistic `json:"apps" description:"app statistic"`
|
||||
Services map[string]*ServiceStatistic `json:"services" description:"service statistic"`
|
||||
Deploys map[string]*DeploymentStatistic `json:"deployments" description:"deployment statistic"`
|
||||
Statefulsets map[string]*StatefulsetStatistic `json:"statefulsets" description:"statefulset statistic"`
|
||||
Daemonsets map[string]*DaemonsetStatistic `json:"daemonsets" description:"daemonsets statistics"`
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetAppStats(name string) *AppStatistic {
|
||||
if rs.Apps == nil {
|
||||
rs.Apps = make(map[string]*AppStatistic)
|
||||
}
|
||||
if rs.Apps[name] == nil {
|
||||
rs.Apps[name] = &AppStatistic{}
|
||||
}
|
||||
return rs.Apps[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetServiceStats(name string) *ServiceStatistic {
|
||||
if rs.Services == nil {
|
||||
rs.Services = make(map[string]*ServiceStatistic)
|
||||
}
|
||||
if rs.Services[name] == nil {
|
||||
rs.Services[name] = &ServiceStatistic{}
|
||||
}
|
||||
return rs.Services[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetDeployStats(name string) *DeploymentStatistic {
|
||||
if rs.Deploys == nil {
|
||||
rs.Deploys = make(map[string]*DeploymentStatistic)
|
||||
}
|
||||
if rs.Deploys[name] == nil {
|
||||
rs.Deploys[name] = &DeploymentStatistic{}
|
||||
}
|
||||
return rs.Deploys[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetStatefulsetStats(name string) *StatefulsetStatistic {
|
||||
if rs.Statefulsets == nil {
|
||||
rs.Statefulsets = make(map[string]*StatefulsetStatistic)
|
||||
}
|
||||
if rs.Statefulsets[name] == nil {
|
||||
rs.Statefulsets[name] = &StatefulsetStatistic{}
|
||||
}
|
||||
return rs.Statefulsets[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetDaemonsetStats(name string) *DaemonsetStatistic {
|
||||
if rs.Daemonsets == nil {
|
||||
rs.Daemonsets = make(map[string]*DaemonsetStatistic)
|
||||
}
|
||||
if rs.Daemonsets[name] == nil {
|
||||
rs.Daemonsets[name] = &DaemonsetStatistic{}
|
||||
}
|
||||
return rs.Daemonsets[name]
|
||||
}
|
||||
@@ -18,6 +18,9 @@ package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -26,14 +29,19 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/monitoring/expressions"
|
||||
"kubesphere.io/kubesphere/pkg/models/openpitrix"
|
||||
resourcev1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
opclient "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
)
|
||||
|
||||
type MonitoringOperator interface {
|
||||
@@ -47,23 +55,31 @@ type MonitoringOperator interface {
|
||||
// TODO: expose KubeSphere self metrics in Prometheus format
|
||||
GetKubeSphereStats() Metrics
|
||||
GetWorkspaceStats(workspace string) Metrics
|
||||
|
||||
// meter
|
||||
GetNamedMetersOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) (Metrics, error)
|
||||
GetNamedMeters(metrics []string, time time.Time, opt monitoring.QueryOption) (Metrics, error)
|
||||
GetAppComponentsMap(ns string, apps []string) map[string][]string
|
||||
GetSerivePodsMap(ns string, services []string) map[string][]string
|
||||
}
|
||||
|
||||
type monitoringOperator struct {
|
||||
prometheus monitoring.Interface
|
||||
metricsserver monitoring.Interface
|
||||
k8s kubernetes.Interface
|
||||
ks ksinformers.SharedInformerFactory
|
||||
op openpitrix.Interface
|
||||
prometheus monitoring.Interface
|
||||
metricsserver monitoring.Interface
|
||||
k8s kubernetes.Interface
|
||||
ks ksinformers.SharedInformerFactory
|
||||
op openpitrix.Interface
|
||||
resourceGetter *resourcev1alpha3.ResourceGetter
|
||||
}
|
||||
|
||||
func NewMonitoringOperator(monitoringClient monitoring.Interface, metricsClient monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, opClient opclient.Client) MonitoringOperator {
|
||||
func NewMonitoringOperator(monitoringClient monitoring.Interface, metricsClient monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, opClient opclient.Client, resourceGetter *resourcev1alpha3.ResourceGetter) MonitoringOperator {
|
||||
return &monitoringOperator{
|
||||
prometheus: monitoringClient,
|
||||
metricsserver: metricsClient,
|
||||
k8s: k8s,
|
||||
ks: factory.KubeSphereSharedInformerFactory(),
|
||||
op: openpitrix.NewOpenpitrixOperator(factory.KubernetesSharedInformerFactory(), opClient),
|
||||
prometheus: monitoringClient,
|
||||
metricsserver: metricsClient,
|
||||
k8s: k8s,
|
||||
ks: factory.KubeSphereSharedInformerFactory(),
|
||||
op: openpitrix.NewOpenpitrixOperator(factory.KubernetesSharedInformerFactory(), opClient),
|
||||
resourceGetter: resourceGetter,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,3 +370,225 @@ func (mo monitoringOperator) GetWorkspaceStats(workspace string) Metrics {
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
meter related methods
|
||||
*/
|
||||
|
||||
func (mo monitoringOperator) getNamedMetersWithHourInterval(meters []string, t time.Time, opt monitoring.QueryOption) Metrics {
|
||||
|
||||
var opts []monitoring.QueryOption
|
||||
|
||||
opts = append(opts, opt)
|
||||
opts = append(opts, monitoring.MeterOption{
|
||||
Step: 1 * time.Hour,
|
||||
})
|
||||
|
||||
ress := mo.prometheus.GetNamedMeters(meters, t, opts)
|
||||
|
||||
return Metrics{Results: ress}
|
||||
}
|
||||
|
||||
func generateScalingFactorMap(step time.Duration) map[string]float64 {
|
||||
scalingMap := make(map[string]float64)
|
||||
|
||||
for k := range MeterResourceMap {
|
||||
scalingMap[k] = step.Hours()
|
||||
}
|
||||
return scalingMap
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) (metrics Metrics, err error) {
|
||||
|
||||
if step.Hours() < 1 {
|
||||
klog.Warning("step should be longer than one hour")
|
||||
step = 1 * time.Hour
|
||||
}
|
||||
if end.Sub(start).Hours() > 30*24 {
|
||||
if step.Hours() < 24 {
|
||||
err = errors.New("step should be larger than 24 hours")
|
||||
return
|
||||
}
|
||||
}
|
||||
if math.Mod(step.Hours(), 1.0) > 0 {
|
||||
err = errors.New("step should be integer hours")
|
||||
return
|
||||
}
|
||||
|
||||
// query time range: (start, end], so here we need to exclude start itself.
|
||||
if start.Add(step).After(end) {
|
||||
start = end
|
||||
} else {
|
||||
start = start.Add(step)
|
||||
}
|
||||
|
||||
var opts []monitoring.QueryOption
|
||||
|
||||
opts = append(opts, opt)
|
||||
opts = append(opts, monitoring.MeterOption{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
})
|
||||
|
||||
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, step, opts)
|
||||
sMap := generateScalingFactorMap(step)
|
||||
|
||||
for i, _ := range ress {
|
||||
ress[i].MetricData = updateMetricStatData(ress[i], sMap)
|
||||
}
|
||||
|
||||
return Metrics{Results: ress}, nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMeters(meters []string, time time.Time, opt monitoring.QueryOption) (Metrics, error) {
|
||||
|
||||
metersPerHour := mo.getNamedMetersWithHourInterval(meters, time, opt)
|
||||
|
||||
for metricIndex, _ := range metersPerHour.Results {
|
||||
|
||||
res := metersPerHour.Results[metricIndex]
|
||||
|
||||
metersPerHour.Results[metricIndex].MetricData = updateMetricStatData(res, nil)
|
||||
}
|
||||
|
||||
return metersPerHour, nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetAppComponentsMap(ns string, apps []string) map[string][]string {
|
||||
|
||||
componentsMap := make(map[string][]string)
|
||||
applicationList := []*appv1beta1.Application{}
|
||||
|
||||
result, err := mo.resourceGetter.List("applications", ns, query.New())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, obj := range result.Items {
|
||||
app, ok := obj.(*appv1beta1.Application)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
applicationList = append(applicationList, app)
|
||||
}
|
||||
|
||||
getAppFullName := func(appObject *v1beta1.Application) (name string) {
|
||||
name = appObject.Labels[constants.ApplicationName]
|
||||
if appObject.Labels[constants.ApplicationVersion] != "" {
|
||||
name += fmt.Sprintf(":%v", appObject.Labels[constants.ApplicationVersion])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
appFilter := func(appObject *v1beta1.Application) bool {
|
||||
|
||||
for _, app := range apps {
|
||||
var applicationName, applicationVersion string
|
||||
tmp := strings.Split(app, ":")
|
||||
|
||||
if len(tmp) >= 1 {
|
||||
applicationName = tmp[0]
|
||||
}
|
||||
if len(tmp) == 2 {
|
||||
applicationVersion = tmp[1]
|
||||
}
|
||||
|
||||
if applicationName != "" && appObject.Labels[constants.ApplicationName] != applicationName {
|
||||
return false
|
||||
}
|
||||
if applicationVersion != "" && appObject.Labels[constants.ApplicationVersion] != applicationVersion {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for _, appObj := range applicationList {
|
||||
if appFilter(appObj) {
|
||||
for _, com := range appObj.Status.ComponentList.Objects {
|
||||
kind := strings.Title(com.Kind)
|
||||
name := com.Name
|
||||
componentsMap[getAppFullName((appObj))] = append(componentsMap[getAppFullName(appObj)], kind+":"+name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return componentsMap
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) getApplicationPVCs(appObject *v1beta1.Application) []string {
|
||||
|
||||
var pvcList []string
|
||||
|
||||
ns := appObject.Namespace
|
||||
for _, com := range appObject.Status.ComponentList.Objects {
|
||||
|
||||
switch strings.Title(com.Kind) {
|
||||
case "Deployment":
|
||||
deployObj, err := mo.k8s.AppsV1().Deployments(ns).Get(context.Background(), com.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, vol := range deployObj.Spec.Template.Spec.Volumes {
|
||||
pvcList = append(pvcList, vol.PersistentVolumeClaim.ClaimName)
|
||||
}
|
||||
case "Statefulset":
|
||||
stsObj, err := mo.k8s.AppsV1().StatefulSets(ns).Get(context.Background(), com.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return nil
|
||||
}
|
||||
for _, vol := range stsObj.Spec.Template.Spec.Volumes {
|
||||
pvcList = append(pvcList, vol.PersistentVolumeClaim.ClaimName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return pvcList
|
||||
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetSerivePodsMap(ns string, services []string) map[string][]string {
|
||||
var svcPodsMap = make(map[string][]string)
|
||||
|
||||
for _, svc := range services {
|
||||
svcObj, err := mo.k8s.CoreV1().Services(ns).Get(context.Background(), svc, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
svcSelector := svcObj.Spec.Selector
|
||||
if len(svcSelector) == 0 {
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
svcLabels := labels.Set{}
|
||||
for key, value := range svcSelector {
|
||||
svcLabels[key] = value
|
||||
}
|
||||
|
||||
selector := labels.SelectorFromSet(svcLabels)
|
||||
opt := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
|
||||
podList, err := mo.k8s.CoreV1().Pods(ns).List(context.Background(), opt)
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
svcPodsMap[svc] = append(svcPodsMap[svc], pod.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ const (
|
||||
WorkspaceDevopsCount = "workspace_devops_project_count"
|
||||
WorkspaceMemberCount = "workspace_member_count"
|
||||
WorkspaceRoleCount = "workspace_role_count"
|
||||
|
||||
MetricMeterPrefix = "meter_"
|
||||
)
|
||||
|
||||
var ClusterMetrics = []string{
|
||||
@@ -78,6 +80,13 @@ var ClusterMetrics = []string{
|
||||
"cluster_load15",
|
||||
"cluster_pod_abnormal_ratio",
|
||||
"cluster_node_offline_ratio",
|
||||
|
||||
// meter
|
||||
"meter_cluster_cpu_usage",
|
||||
"meter_cluster_memory_usage",
|
||||
"meter_cluster_net_bytes_transmitted",
|
||||
"meter_cluster_net_bytes_received",
|
||||
"meter_cluster_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var NodeMetrics = []string{
|
||||
@@ -113,6 +122,13 @@ var NodeMetrics = []string{
|
||||
"node_load15",
|
||||
"node_pod_abnormal_ratio",
|
||||
"node_pleg_quantile",
|
||||
|
||||
// meter
|
||||
"meter_node_cpu_usage",
|
||||
"meter_node_memory_usage_wo_cache",
|
||||
"meter_node_net_bytes_transmitted",
|
||||
"meter_node_net_bytes_received",
|
||||
"meter_node_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var WorkspaceMetrics = []string{
|
||||
@@ -138,6 +154,13 @@ var WorkspaceMetrics = []string{
|
||||
"workspace_service_count",
|
||||
"workspace_secret_count",
|
||||
"workspace_pod_abnormal_ratio",
|
||||
|
||||
// meter
|
||||
"meter_workspace_cpu_usage",
|
||||
"meter_workspace_memory_usage",
|
||||
"meter_workspace_net_bytes_transmitted",
|
||||
"meter_workspace_net_bytes_received",
|
||||
"meter_workspace_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var NamespaceMetrics = []string{
|
||||
@@ -168,6 +191,23 @@ var NamespaceMetrics = []string{
|
||||
"namespace_configmap_count",
|
||||
"namespace_ingresses_extensions_count",
|
||||
"namespace_s2ibuilder_count",
|
||||
|
||||
// meter
|
||||
"meter_namespace_cpu_usage",
|
||||
"meter_namespace_memory_usage_wo_cache",
|
||||
"meter_namespace_net_bytes_transmitted",
|
||||
"meter_namespace_net_bytes_received",
|
||||
"meter_namespace_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ApplicationMetrics = []string{
|
||||
|
||||
// meter
|
||||
"meter_application_cpu_usage",
|
||||
"meter_application_memory_usage_wo_cache",
|
||||
"meter_application_net_bytes_transmitted",
|
||||
"meter_application_net_bytes_received",
|
||||
"meter_application_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var WorkloadMetrics = []string{
|
||||
@@ -185,6 +225,21 @@ var WorkloadMetrics = []string{
|
||||
"workload_deployment_unavailable_replicas_ratio",
|
||||
"workload_daemonset_unavailable_replicas_ratio",
|
||||
"workload_statefulset_unavailable_replicas_ratio",
|
||||
|
||||
// meter
|
||||
"meter_workload_cpu_usage",
|
||||
"meter_workload_memory_usage_wo_cache",
|
||||
"meter_workload_net_bytes_transmitted",
|
||||
"meter_workload_net_bytes_received",
|
||||
"meter_workload_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ServiceMetrics = []string{
|
||||
// meter
|
||||
"meter_service_cpu_usage",
|
||||
"meter_service_memory_usage_wo_cache",
|
||||
"meter_service_net_bytes_transmitted",
|
||||
"meter_service_net_bytes_received",
|
||||
}
|
||||
|
||||
var PodMetrics = []string{
|
||||
@@ -193,6 +248,13 @@ var PodMetrics = []string{
|
||||
"pod_memory_usage_wo_cache",
|
||||
"pod_net_bytes_transmitted",
|
||||
"pod_net_bytes_received",
|
||||
|
||||
// meter
|
||||
"meter_pod_cpu_usage",
|
||||
"meter_pod_memory_usage_wo_cache",
|
||||
"meter_pod_net_bytes_transmitted",
|
||||
"meter_pod_net_bytes_received",
|
||||
"meter_pod_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ContainerMetrics = []string{
|
||||
|
||||
@@ -23,13 +23,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
IdentifierNode = "node"
|
||||
IdentifierWorkspace = "workspace"
|
||||
IdentifierNamespace = "namespace"
|
||||
IdentifierWorkload = "workload"
|
||||
IdentifierPod = "pod"
|
||||
IdentifierContainer = "container"
|
||||
IdentifierPVC = "persistentvolumeclaim"
|
||||
IdentifierNode = "node"
|
||||
IdentifierWorkspace = "workspace"
|
||||
IdentifierNamespace = "namespace"
|
||||
IdentifierWorkload = "workload"
|
||||
IdentifierPod = "pod"
|
||||
IdentifierContainer = "container"
|
||||
IdentifierPVC = "persistentvolumeclaim"
|
||||
IdentifierService = "service"
|
||||
IdentifierApplication = "application"
|
||||
|
||||
OrderAscending = "asc"
|
||||
OrderDescending = "desc"
|
||||
|
||||
@@ -16,7 +16,9 @@ limitations under the License.
|
||||
|
||||
package monitoring
|
||||
|
||||
import "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
Results []monitoring.Metric `json:"results" description:"actual array of results"`
|
||||
|
||||
222
pkg/models/monitoring/utils.go
Normal file
222
pkg/models/monitoring/utils.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/klog"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
const (
|
||||
METER_RESOURCE_TYPE_CPU = iota
|
||||
METER_RESOURCE_TYPE_MEM
|
||||
METER_RESOURCE_TYPE_NET_INGRESS
|
||||
METER_RESOURCE_TYPE_NET_EGRESS
|
||||
METER_RESOURCE_TYPE_PVC
|
||||
|
||||
meteringConfig = "/etc/kubesphere/metering/ks-metering.yaml"
|
||||
)
|
||||
|
||||
var MeterResourceMap = map[string]int{
|
||||
"meter_cluster_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_cluster_memory_usage": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_cluster_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_cluster_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_cluster_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_node_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_node_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_node_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_node_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_node_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_workspace_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_workspace_memory_usage": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_workspace_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_workspace_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_workspace_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_namespace_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_namespace_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_namespace_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_namespace_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_namespace_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_application_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_application_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_application_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_application_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_application_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_workload_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_workload_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_workload_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_workload_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_workload_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_service_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_service_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_service_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_service_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_pod_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_pod_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_pod_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_pod_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_pod_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
}
|
||||
|
||||
type PriceInfo struct {
|
||||
CpuPerCorePerHour float64 `json:"cpuPerCorePerHour" yaml:"cpuPerCorePerHour"`
|
||||
MemPerGigabytesPerHour float64 `json:"memPerGigabytesPerHour" yaml:"memPerGigabytesPerHour"`
|
||||
IngressNetworkTrafficPerGiagabytesPerHour float64 `json:"ingressNetworkTrafficPerGiagabytesPerHour" yaml:"ingressNetworkTrafficPerGiagabytesPerHour"`
|
||||
EgressNetworkTrafficPerGigabytesPerHour float64 `json:"egressNetworkTrafficPerGigabytesPerHour" yaml:"egressNetworkTrafficPerGigabytesPerHour"`
|
||||
PvcPerGigabytesPerHour float64 `json:"pvcPerGigabytesPerHour" yaml:"pvcPerGigabytesPerHour"`
|
||||
}
|
||||
|
||||
type Billing struct {
|
||||
PriceInfo PriceInfo `json:"priceInfo" yaml:"priceInfo"`
|
||||
}
|
||||
|
||||
type MeterConfig struct {
|
||||
Billing Billing `json:"billing" yaml:"billing"`
|
||||
}
|
||||
|
||||
func (mc MeterConfig) GetPriceInfo() PriceInfo {
|
||||
return mc.Billing.PriceInfo
|
||||
}
|
||||
|
||||
func LoadYaml() (*MeterConfig, error) {
|
||||
|
||||
var meterConfig MeterConfig
|
||||
|
||||
mf, err := os.Open(meteringConfig)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = yaml.NewYAMLOrJSONDecoder(mf, 1024).Decode(&meterConfig); err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &meterConfig, nil
|
||||
}
|
||||
|
||||
func getMaxPointValue(points []monitoring.Point) float64 {
|
||||
var max float64
|
||||
for i, p := range points {
|
||||
if i == 0 {
|
||||
max = p.Value()
|
||||
}
|
||||
|
||||
if p.Value() > max {
|
||||
max = p.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return max
|
||||
}
|
||||
|
||||
func getMinPointValue(points []monitoring.Point) float64 {
|
||||
var min float64
|
||||
for i, p := range points {
|
||||
if i == 0 {
|
||||
min = p.Value()
|
||||
}
|
||||
|
||||
if p.Value() < min {
|
||||
min = p.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func getSumPointValue(points []monitoring.Point) float64 {
|
||||
avg := 0.0
|
||||
|
||||
for _, p := range points {
|
||||
avg += p.Value()
|
||||
}
|
||||
|
||||
return avg
|
||||
}
|
||||
|
||||
func getAvgPointValue(points []monitoring.Point) float64 {
|
||||
return getSumPointValue(points) / float64(len(points))
|
||||
}
|
||||
|
||||
func getFeeWithMeterName(meterName string, sum float64) float64 {
|
||||
|
||||
meterConfig, err := LoadYaml()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return -1
|
||||
}
|
||||
priceInfo := meterConfig.GetPriceInfo()
|
||||
|
||||
if resourceType, ok := MeterResourceMap[meterName]; !ok {
|
||||
klog.Errorf("invlaid meter %v", meterName)
|
||||
return -1
|
||||
} else {
|
||||
switch resourceType {
|
||||
case METER_RESOURCE_TYPE_CPU:
|
||||
// unit: core, precision: 0.001
|
||||
sum = math.Round(sum*1000) / 1000
|
||||
return priceInfo.CpuPerCorePerHour * sum
|
||||
case METER_RESOURCE_TYPE_MEM:
|
||||
// unit: Gigabyte, precision: 0.1
|
||||
sum = math.Round(sum/1073741824*10) / 10
|
||||
return priceInfo.MemPerGigabytesPerHour * sum
|
||||
case METER_RESOURCE_TYPE_NET_INGRESS:
|
||||
// unit: Megabyte, precision: 1
|
||||
sum = math.Round(sum / 1048576)
|
||||
return priceInfo.IngressNetworkTrafficPerGiagabytesPerHour * sum
|
||||
case METER_RESOURCE_TYPE_NET_EGRESS:
|
||||
// unit: Megabyte, precision:
|
||||
sum = math.Round(sum / 1048576)
|
||||
return priceInfo.EgressNetworkTrafficPerGigabytesPerHour * sum
|
||||
case METER_RESOURCE_TYPE_PVC:
|
||||
// unit: Gigabyte, precision: 0.1
|
||||
sum = math.Round(sum/1073741824*10) / 10
|
||||
return priceInfo.PvcPerGigabytesPerHour * sum
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float64) monitoring.MetricData {
|
||||
metricName := metric.MetricName
|
||||
metricData := metric.MetricData
|
||||
for index, metricValue := range metricData.MetricValues {
|
||||
|
||||
var points []monitoring.Point
|
||||
if metricData.MetricType == monitoring.MetricTypeMatrix {
|
||||
points = metricValue.Series
|
||||
} else {
|
||||
points = append(points, *metricValue.Sample)
|
||||
}
|
||||
|
||||
var factor float64 = 1
|
||||
if scalingMap != nil {
|
||||
factor = scalingMap[metricName]
|
||||
}
|
||||
|
||||
if len(points) == 1 {
|
||||
sample := points[0]
|
||||
sum := sample[1] * factor
|
||||
metricData.MetricValues[index].MinValue = sample[1]
|
||||
metricData.MetricValues[index].MaxValue = sample[1]
|
||||
metricData.MetricValues[index].AvgValue = sample[1]
|
||||
metricData.MetricValues[index].SumValue = sum
|
||||
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
|
||||
} else {
|
||||
sum := getSumPointValue(points) * factor
|
||||
metricData.MetricValues[index].MinValue = getMinPointValue(points)
|
||||
metricData.MetricValues[index].MaxValue = getMaxPointValue(points)
|
||||
metricData.MetricValues[index].AvgValue = getAvgPointValue(points)
|
||||
metricData.MetricValues[index].SumValue = sum
|
||||
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum)
|
||||
}
|
||||
|
||||
}
|
||||
return metricData
|
||||
}
|
||||
1117
pkg/models/tenant/metering.go
Normal file
1117
pkg/models/tenant/metering.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,7 @@ import (
|
||||
auditingv1alpha1 "kubesphere.io/kubesphere/pkg/api/auditing/v1alpha1"
|
||||
eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1"
|
||||
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
|
||||
meteringv1alpha1 "kubesphere.io/kubesphere/pkg/api/metering/v1alpha1"
|
||||
clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1"
|
||||
tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1"
|
||||
tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2"
|
||||
@@ -50,11 +51,16 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/models/events"
|
||||
"kubesphere.io/kubesphere/pkg/models/iam/am"
|
||||
"kubesphere.io/kubesphere/pkg/models/logging"
|
||||
"kubesphere.io/kubesphere/pkg/models/metering"
|
||||
"kubesphere.io/kubesphere/pkg/models/monitoring"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
resourcesv1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
resourcev1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
auditingclient "kubesphere.io/kubesphere/pkg/simple/client/auditing"
|
||||
eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events"
|
||||
loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging"
|
||||
monitoringclient "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
opclient "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
@@ -79,6 +85,8 @@ type Interface interface {
|
||||
PatchNamespace(workspace string, namespace *corev1.Namespace) (*corev1.Namespace, error)
|
||||
PatchWorkspace(workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
ListClusters(info user.Info) (*api.ListResult, error)
|
||||
Metering(user user.Info, queryParam *meteringv1alpha1.Query) (monitoring.Metrics, error)
|
||||
MeteringHierarchy(user user.Info, queryParam *meteringv1alpha1.Query) (metering.ResourceStatistic, error)
|
||||
}
|
||||
|
||||
type tenantOperator struct {
|
||||
@@ -90,9 +98,10 @@ type tenantOperator struct {
|
||||
events events.Interface
|
||||
lo logging.LoggingOperator
|
||||
auditing auditing.Interface
|
||||
mo monitoring.MonitoringOperator
|
||||
}
|
||||
|
||||
func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ksclient kubesphere.Interface, evtsClient eventsclient.Client, loggingClient loggingclient.Client, auditingclient auditingclient.Client, am am.AccessManagementInterface, authorizer authorizer.Authorizer) Interface {
|
||||
func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ksclient kubesphere.Interface, evtsClient eventsclient.Client, loggingClient loggingclient.Client, auditingclient auditingclient.Client, am am.AccessManagementInterface, authorizer authorizer.Authorizer, monitoringclient monitoringclient.Interface, opClient opclient.Client, resourceGetter *resourcev1alpha3.ResourceGetter) Interface {
|
||||
return &tenantOperator{
|
||||
am: am,
|
||||
authorizer: authorizer,
|
||||
@@ -102,6 +111,7 @@ func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ks
|
||||
events: events.NewEventsOperator(evtsClient),
|
||||
lo: logging.NewLoggingOperator(loggingClient),
|
||||
auditing: auditing.NewEventsOperator(auditingclient),
|
||||
mo: monitoring.NewMonitoringOperator(monitoringclient, nil, k8sclient, informers, opClient, resourceGetter),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -951,6 +961,38 @@ func (t *tenantOperator) Auditing(user user.Info, queryParam *auditingv1alpha1.Q
|
||||
})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) Metering(user user.Info, query *meteringv1alpha1.Query) (metrics monitoring.Metrics, err error) {
|
||||
|
||||
var opt QueryOptions
|
||||
|
||||
opt, err = t.makeQueryOptions(user, *query, query.Level)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
metrics, err = t.ProcessNamedMetersQuery(opt)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *tenantOperator) MeteringHierarchy(user user.Info, queryParam *meteringv1alpha1.Query) (metering.ResourceStatistic, error) {
|
||||
res, err := t.Metering(user, queryParam)
|
||||
if err != nil {
|
||||
return metering.ResourceStatistic{}, err
|
||||
}
|
||||
|
||||
// get pods stat info under ns
|
||||
podsStats := t.transformMetricData(res)
|
||||
|
||||
// classify pods stats
|
||||
resourceStats, err := t.classifyPodStats(user, queryParam.NamespaceName, podsStats)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return metering.ResourceStatistic{}, err
|
||||
}
|
||||
|
||||
return resourceStats, nil
|
||||
}
|
||||
|
||||
func contains(objects []runtime.Object, object runtime.Object) bool {
|
||||
for _, item := range objects {
|
||||
if item == object {
|
||||
|
||||
@@ -541,5 +541,5 @@ func prepare() Interface {
|
||||
amOperator := am.NewOperator(ksClient, k8sClient, fakeInformerFactory)
|
||||
authorizer := rbac.NewRBACAuthorizer(amOperator)
|
||||
|
||||
return New(fakeInformerFactory, k8sClient, ksClient, nil, nil, nil, amOperator, authorizer)
|
||||
return New(fakeInformerFactory, k8sClient, ksClient, nil, nil, nil, amOperator, authorizer, nil, nil, nil)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user