Add metrics-server client to monitoring

Signed-off-by: root <danma@yunify.com>
This commit is contained in:
root
2020-11-30 07:27:17 +00:00
parent 38eaa5cde0
commit 8c86c9e1a5
8 changed files with 420 additions and 25 deletions

View File

@@ -37,6 +37,7 @@ import (
eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events/elasticsearch" eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/k8s"
esclient "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch" esclient "kubesphere.io/kubesphere/pkg/simple/client/logging/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/metricsserver"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus" "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "kubesphere.io/kubesphere/pkg/simple/client/openpitrix"
"kubesphere.io/kubesphere/pkg/simple/client/s3" "kubesphere.io/kubesphere/pkg/simple/client/s3"
@@ -117,13 +118,19 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
if s.MonitoringOptions == nil || len(s.MonitoringOptions.Endpoint) == 0 { if s.MonitoringOptions == nil || len(s.MonitoringOptions.Endpoint) == 0 {
return nil, fmt.Errorf("moinitoring service address in configuration MUST not be empty, please check configmap/kubesphere-config in kubesphere-system namespace") return nil, fmt.Errorf("moinitoring service address in configuration MUST not be empty, please check configmap/kubesphere-config in kubesphere-system namespace")
} else { } else {
monitoringClient, err := prometheus.NewPrometheus(s.MonitoringOptions) prometheusClient, err := prometheus.NewPrometheus(s.MonitoringOptions)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to connect to prometheus, please check prometheus status, error: %v", err) return nil, fmt.Errorf("failed to connect to prometheus, please check prometheus status, error: %v", err)
} }
apiServer.MonitoringClient = monitoringClient apiServer.PrometheusClient = prometheusClient
} }
metricsClient, err := metricsserver.NewMetricsServer(kubernetesClient.Kubernetes(), s.KubernetesOptions)
if err != nil {
return nil, fmt.Errorf("failed to connect to metrics-server, please check metrics-server status, error: %v", err)
}
apiServer.MetricsClient = metricsClient
if s.LoggingOptions.Host != "" { if s.LoggingOptions.Host != "" {
loggingClient, err := esclient.NewClient(s.LoggingOptions) loggingClient, err := esclient.NewClient(s.LoggingOptions)
if err != nil { if err != nil {

View File

@@ -130,7 +130,9 @@ type APIServer struct {
CacheClient cache.Interface CacheClient cache.Interface
// monitoring client set // monitoring client set
MonitoringClient monitoring.Interface PrometheusClient monitoring.Interface
MetricsClient monitoring.Interface
// //
OpenpitrixClient openpitrix.Client OpenpitrixClient openpitrix.Client
@@ -212,7 +214,7 @@ func (s *APIServer) installKubeSphereAPIs() {
urlruntime.Must(configv1alpha2.AddToContainer(s.container, s.Config)) urlruntime.Must(configv1alpha2.AddToContainer(s.container, s.Config))
urlruntime.Must(resourcev1alpha3.AddToContainer(s.container, s.InformerFactory, s.RuntimeCache)) urlruntime.Must(resourcev1alpha3.AddToContainer(s.container, s.InformerFactory, s.RuntimeCache))
urlruntime.Must(monitoringv1alpha3.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.MonitoringClient, s.InformerFactory, s.OpenpitrixClient)) urlruntime.Must(monitoringv1alpha3.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.PrometheusClient, s.MetricsClient, s.InformerFactory, s.OpenpitrixClient))
urlruntime.Must(openpitrixv1.AddToContainer(s.container, s.InformerFactory, s.OpenpitrixClient)) urlruntime.Must(openpitrixv1.AddToContainer(s.container, s.InformerFactory, s.OpenpitrixClient))
urlruntime.Must(operationsv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes())) urlruntime.Must(operationsv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes()))
urlruntime.Must(resourcesv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.InformerFactory, urlruntime.Must(resourcesv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.InformerFactory,

View File

@@ -35,8 +35,8 @@ type handler struct {
mo model.MonitoringOperator mo model.MonitoringOperator
} }
func newHandler(k kubernetes.Interface, m monitoring.Interface, f informers.InformerFactory, o openpitrix.Client) *handler { func newHandler(k kubernetes.Interface, prometheusClient monitoring.Interface, metricsClient monitoring.Interface, f informers.InformerFactory, o openpitrix.Client) *handler {
return &handler{k, model.NewMonitoringOperator(m, k, f, o)} return &handler{k, model.NewMonitoringOperator(prometheusClient, metricsClient, k, f, o)}
} }
func (h handler) handleKubeSphereMetricsQuery(req *restful.Request, resp *restful.Response) { func (h handler) handleKubeSphereMetricsQuery(req *restful.Request, resp *restful.Response) {

View File

@@ -217,7 +217,7 @@ func TestParseRequestParams(t *testing.T) {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
client := fake.NewSimpleClientset(&tt.namespace) client := fake.NewSimpleClientset(&tt.namespace)
fakeInformerFactory := informers.NewInformerFactories(client, nil, nil, nil, nil, nil) fakeInformerFactory := informers.NewInformerFactories(client, nil, nil, nil, nil, nil)
handler := newHandler(client, nil, fakeInformerFactory, nil) handler := newHandler(client, nil, nil, fakeInformerFactory, nil)
result, err := handler.makeQueryOptions(tt.params, tt.lvl) result, err := handler.makeQueryOptions(tt.params, tt.lvl)
if err != nil { if err != nil {

View File

@@ -39,10 +39,10 @@ const (
var GroupVersion = schema.GroupVersion{Group: groupName, Version: "v1alpha3"} var GroupVersion = schema.GroupVersion{Group: groupName, Version: "v1alpha3"}
func AddToContainer(c *restful.Container, k8sClient kubernetes.Interface, monitoringClient monitoring.Interface, factory informers.InformerFactory, opClient openpitrix.Client) error { func AddToContainer(c *restful.Container, k8sClient kubernetes.Interface, prometheusClient monitoring.Interface, metricsClient monitoring.Interface, factory informers.InformerFactory, opClient openpitrix.Client) error {
ws := runtime.NewWebService(GroupVersion) ws := runtime.NewWebService(GroupVersion)
h := newHandler(k8sClient, monitoringClient, factory, opClient) h := newHandler(k8sClient, prometheusClient, metricsClient, factory, opClient)
ws.Route(ws.GET("/kubesphere"). ws.Route(ws.GET("/kubesphere").
To(h.handleKubeSphereMetricsQuery). To(h.handleKubeSphereMetricsQuery).

View File

@@ -47,18 +47,20 @@ type MonitoringOperator interface {
} }
type monitoringOperator struct { type monitoringOperator struct {
c monitoring.Interface prometheus monitoring.Interface
k8s kubernetes.Interface metricsserver monitoring.Interface
ks ksinformers.SharedInformerFactory k8s kubernetes.Interface
op openpitrix.Interface ks ksinformers.SharedInformerFactory
op openpitrix.Interface
} }
func NewMonitoringOperator(client monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, opClient opclient.Client) MonitoringOperator { func NewMonitoringOperator(prometheusClient monitoring.Interface, metricsClient monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, opClient opclient.Client) MonitoringOperator {
return &monitoringOperator{ return &monitoringOperator{
c: client, prometheus: prometheusClient,
k8s: k8s, metricsserver: metricsClient,
ks: factory.KubeSphereSharedInformerFactory(), k8s: k8s,
op: openpitrix.NewOpenpitrixOperator(factory.KubernetesSharedInformerFactory(), opClient), ks: factory.KubeSphereSharedInformerFactory(),
op: openpitrix.NewOpenpitrixOperator(factory.KubernetesSharedInformerFactory(), opClient),
} }
} }
@@ -74,7 +76,7 @@ func (mo monitoringOperator) GetMetric(expr, namespace string, time time.Time) (
return monitoring.Metric{}, err return monitoring.Metric{}, err
} }
} }
return mo.c.GetMetric(expr, time), nil return mo.prometheus.GetMetric(expr, time), nil
} }
func (mo monitoringOperator) GetMetricOverTime(expr, namespace string, start, end time.Time, step time.Duration) (monitoring.Metric, error) { func (mo monitoringOperator) GetMetricOverTime(expr, namespace string, start, end time.Time, step time.Duration) (monitoring.Metric, error) {
@@ -89,21 +91,49 @@ func (mo monitoringOperator) GetMetricOverTime(expr, namespace string, start, en
return monitoring.Metric{}, err return monitoring.Metric{}, err
} }
} }
return mo.c.GetMetricOverTime(expr, start, end, step), nil return mo.prometheus.GetMetricOverTime(expr, start, end, step), nil
} }
func (mo monitoringOperator) GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics { func (mo monitoringOperator) GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics {
ress := mo.c.GetNamedMetrics(metrics, time, opt) ress := mo.prometheus.GetNamedMetrics(metrics, time, opt)
mr := mo.metricsserver.GetNamedMetrics(metrics, time, opt)
//Merge edge node metrics data
edgeMetrics := make(map[string]monitoring.MetricData)
for _, metric := range mr {
edgeMetrics[metric.MetricName] = metric.MetricData
}
for i, metric := range ress {
if val, ok := edgeMetrics[metric.MetricName]; ok {
ress[i].MetricData.MetricValues = append(ress[i].MetricData.MetricValues, val.MetricValues...)
}
}
return Metrics{Results: ress} return Metrics{Results: ress}
} }
func (mo monitoringOperator) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics { func (mo monitoringOperator) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics {
ress := mo.c.GetNamedMetricsOverTime(metrics, start, end, step, opt) ress := mo.prometheus.GetNamedMetricsOverTime(metrics, start, end, step, opt)
mr := mo.metricsserver.GetNamedMetricsOverTime(metrics, start, end, step, opt)
//Merge edge node metrics data
edgeMetrics := make(map[string]monitoring.MetricData)
for _, metric := range mr {
edgeMetrics[metric.MetricName] = metric.MetricData
}
for i, metric := range ress {
if val, ok := edgeMetrics[metric.MetricName]; ok {
ress[i].MetricData.MetricValues = append(ress[i].MetricData.MetricValues, val.MetricValues...)
}
}
return Metrics{Results: ress} return Metrics{Results: ress}
} }
func (mo monitoringOperator) GetMetadata(namespace string) Metadata { func (mo monitoringOperator) GetMetadata(namespace string) Metadata {
data := mo.c.GetMetadata(namespace) data := mo.prometheus.GetMetadata(namespace)
return Metadata{Data: data} return Metadata{Data: data}
} }
@@ -121,7 +151,7 @@ func (mo monitoringOperator) GetMetricLabelSet(metric, namespace string, start,
return MetricLabelSet{} return MetricLabelSet{}
} }
} }
data := mo.c.GetMetricLabelSet(expr, start, end) data := mo.prometheus.GetMetricLabelSet(expr, start, end)
return MetricLabelSet{Data: data} return MetricLabelSet{Data: data}
} }

View File

@@ -0,0 +1,356 @@
/*
Copyright 2020 KubeSphere Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metricsserver
import (
"context"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
promlabels "github.com/prometheus/prometheus/pkg/labels"
metricsapi "k8s.io/metrics/pkg/apis/metrics"
metricsV1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
)
// metricsServer implements monitoring interface backend by metrics-server
type metricsServer struct {
metricsAPIAvailable bool
metricsClient *metricsclient.Clientset
k8s kubernetes.Interface
}
var (
supportedMetricsAPIs = map[string]bool{
"v1beta1": true,
}
)
const edgeNodeLabel = "node-role.kubernetes.io/edge"
func metricsAPISupported(discoveredAPIGroups *metav1.APIGroupList) bool {
for _, discoveredAPIGroup := range discoveredAPIGroups.Groups {
if discoveredAPIGroup.Name != metricsapi.GroupName {
continue
}
for _, version := range discoveredAPIGroup.Versions {
if _, found := supportedMetricsAPIs[version.Version]; found {
return true
}
}
}
return false
}
func (m metricsServer) listEdgeNodes() (map[string]v1.Node, error) {
nodes := make(map[string]v1.Node)
nodeClient := m.k8s.CoreV1()
nodeList, err := nodeClient.Nodes().List(context.TODO(), metav1.ListOptions{
LabelSelector: edgeNodeLabel,
})
if err != nil {
return nodes, err
}
for _, n := range nodeList.Items {
nodes[n.Name] = n
}
return nodes, nil
}
func (m metricsServer) filterEdgeNodeNames(edgeNodes map[string]v1.Node, opts *monitoring.QueryOptions) map[string]bool {
edgeNodeNamesFiltered := make(map[string]bool)
regexMatcher, err := promlabels.NewMatcher(promlabels.MatchRegexp, "edgenodefilter", opts.ResourceFilter)
if err != nil {
klog.Errorf("Edge node filter regexp error %v\n", err)
return edgeNodeNamesFiltered
}
for _, n := range edgeNodes {
if regexMatcher.Matches(n.Name) {
edgeNodeNamesFiltered[n.Name] = true
}
}
return edgeNodeNamesFiltered
}
func (m metricsServer) getNodeMetricsFromMetricsAPI() (*metricsapi.NodeMetricsList, error) {
var err error
versionedMetrics := &metricsV1beta1.NodeMetricsList{}
mc := m.metricsClient.MetricsV1beta1()
nm := mc.NodeMetricses()
versionedMetrics, err = nm.List(context.TODO(), metav1.ListOptions{LabelSelector: edgeNodeLabel})
if err != nil {
return nil, err
}
metrics := &metricsapi.NodeMetricsList{}
err = metricsV1beta1.Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(versionedMetrics, metrics, nil)
if err != nil {
return nil, err
}
return metrics, nil
}
func NewMetricsServer(k kubernetes.Interface, options *k8s.KubernetesOptions) (monitoring.Interface, error) {
var metricsServer metricsServer
config, err := clientcmd.BuildConfigFromFlags("", options.KubeConfig)
if err != nil {
klog.Error(err)
return metricsServer, err
}
metricsServer.k8s = k
discoveryClient := k.Discovery()
apiGroups, err := discoveryClient.ServerGroups()
if err != nil {
klog.Error(err)
return metricsServer, err
}
metricsServer.metricsAPIAvailable = metricsAPISupported(apiGroups)
if !metricsServer.metricsAPIAvailable {
klog.Warningf("Metrics API not available.")
return metricsServer, err
}
metricsClient, err := metricsclient.NewForConfig(config)
if err != nil {
klog.Error(err)
return metricsServer, err
}
metricsServer.metricsClient = metricsClient
return metricsServer, nil
}
func (m metricsServer) GetMetric(expr string, ts time.Time) monitoring.Metric {
var parsedResp monitoring.Metric
return parsedResp
}
func (m metricsServer) GetMetricOverTime(expr string, start, end time.Time, step time.Duration) monitoring.Metric {
var parsedResp monitoring.Metric
return parsedResp
}
var edgeNodeMetrics = []string{"node_cpu_usage", "node_cpu_total", "node_cpu_utilisation", "node_memory_usage_wo_cache", "node_memory_total", "node_memory_utilisation"}
func (m metricsServer) GetNamedMetrics(metrics []string, ts time.Time, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
opts := monitoring.NewQueryOptions()
o.Apply(opts)
if opts.Level == monitoring.LevelNode {
if !m.metricsAPIAvailable {
klog.Warningf("Metrics API not available.")
return res
}
edgeNodes, err := m.listEdgeNodes()
if err != nil {
klog.Errorf("List edge nodes error %v\n", err)
return res
}
edgeNodeNamesFiltered := m.filterEdgeNodeNames(edgeNodes, opts)
if len(edgeNodeNamesFiltered) == 0 {
klog.V(4).Infof("No edge node metrics is requested")
return res
}
metricsResult, err := m.getNodeMetricsFromMetricsAPI()
if err != nil {
klog.Errorf("Get edge node metrics error %v\n", err)
return res
}
status := make(map[string]v1.NodeStatus)
for n, _ := range edgeNodeNamesFiltered {
status[n] = edgeNodes[n].Status
}
nodeMetrics := make(map[string]*monitoring.MetricData)
for _, enm := range edgeNodeMetrics {
nodeMetrics[enm] = &monitoring.MetricData{MetricType: monitoring.MetricTypeVector}
}
var usage v1.ResourceList
var cap v1.ResourceList
for _, m := range metricsResult.Items {
_, ok := edgeNodeNamesFiltered[m.Name]
if !ok {
continue
}
m.Usage.DeepCopyInto(&usage)
status[m.Name].Capacity.DeepCopyInto(&cap)
metricValues := make(map[string]*monitoring.MetricValue)
for _, enm := range edgeNodeMetrics {
metricValues[enm] = &monitoring.MetricValue{
Metadata: make(map[string]string),
}
metricValues[enm].Metadata["node"] = m.Name
metricValues[enm].Metadata["role"] = "edge"
}
for _, addr := range status[m.Name].Addresses {
if addr.Type == v1.NodeInternalIP {
for _, enm := range edgeNodeMetrics {
metricValues[enm].Metadata["host_ip"] = addr.Address
}
break
}
}
metricValues["node_cpu_usage"].Sample = &monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Cpu().MilliValue()) / 1000}
metricValues["node_cpu_total"].Sample = &monitoring.Point{float64(m.Timestamp.Unix()), float64(cap.Cpu().MilliValue()) / 1000}
metricValues["node_cpu_utilisation"].Sample = &monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Cpu().MilliValue()) / float64(cap.Cpu().MilliValue())}
metricValues["node_memory_usage_wo_cache"].Sample = &monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Memory().Value())}
metricValues["node_memory_total"].Sample = &monitoring.Point{float64(m.Timestamp.Unix()), float64(cap.Memory().Value())}
metricValues["node_memory_utilisation"].Sample = &monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Memory().Value()) / float64(cap.Memory().Value())}
for _, enm := range edgeNodeMetrics {
nodeMetrics[enm].MetricValues = append(nodeMetrics[enm].MetricValues, *metricValues[enm])
}
}
for _, enm := range edgeNodeMetrics {
res = append(res, monitoring.Metric{MetricName: enm, MetricData: *nodeMetrics[enm]})
}
}
return res
}
func (m metricsServer) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, o monitoring.QueryOption) []monitoring.Metric {
var res []monitoring.Metric
opts := monitoring.NewQueryOptions()
o.Apply(opts)
if opts.Level == monitoring.LevelNode {
if !m.metricsAPIAvailable {
klog.Warningf("Metrics API not available.")
return res
}
edgeNodes, err := m.listEdgeNodes()
if err != nil {
klog.Errorf("List edge nodes error %v\n", err)
return res
}
edgeNodeNamesFiltered := m.filterEdgeNodeNames(edgeNodes, opts)
if len(edgeNodeNamesFiltered) == 0 {
klog.V(4).Infof("No edge node metrics is requested")
return res
}
metricsResult, err := m.getNodeMetricsFromMetricsAPI()
if err != nil {
klog.Errorf("Get edge node metrics error %v\n", err)
return res
}
status := make(map[string]v1.NodeStatus)
for n, _ := range edgeNodeNamesFiltered {
status[n] = edgeNodes[n].Status
}
nodeMetrics := make(map[string]*monitoring.MetricData)
for _, enm := range edgeNodeMetrics {
nodeMetrics[enm] = &monitoring.MetricData{MetricType: monitoring.MetricTypeMatrix}
}
var usage v1.ResourceList
var cap v1.ResourceList
for _, m := range metricsResult.Items {
_, ok := edgeNodeNamesFiltered[m.Name]
if !ok {
continue
}
m.Usage.DeepCopyInto(&usage)
status[m.Name].Capacity.DeepCopyInto(&cap)
metricValues := make(map[string]*monitoring.MetricValue)
for _, enm := range edgeNodeMetrics {
metricValues[enm] = &monitoring.MetricValue{
Metadata: make(map[string]string),
}
metricValues[enm].Metadata["node"] = m.Name
metricValues[enm].Metadata["role"] = "edge"
}
for _, addr := range status[m.Name].Addresses {
if addr.Type == v1.NodeInternalIP {
for _, enm := range edgeNodeMetrics {
metricValues[enm].Metadata["host_ip"] = addr.Address
}
break
}
}
metricValues["node_cpu_usage"].Series = append(metricValues["node_cpu_usage"].Series, monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Cpu().MilliValue()) / 1000})
metricValues["node_cpu_total"].Series = append(metricValues["node_cpu_total"].Series, monitoring.Point{float64(m.Timestamp.Unix()), float64(cap.Cpu().MilliValue()) / 1000})
metricValues["node_cpu_utilisation"].Series = append(metricValues["node_cpu_utilisation"].Series, monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Cpu().MilliValue()) / float64(cap.Cpu().MilliValue())})
metricValues["node_memory_usage_wo_cache"].Series = append(metricValues["node_memory_usage_wo_cache"].Series, monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Memory().Value())})
metricValues["node_memory_total"].Series = append(metricValues["node_memory_total"].Series, monitoring.Point{float64(m.Timestamp.Unix()), float64(cap.Memory().Value())})
metricValues["node_memory_utilisation"].Series = append(metricValues["node_memory_utilisation"].Series, monitoring.Point{float64(m.Timestamp.Unix()), float64(usage.Memory().Value()) / float64(cap.Memory().Value())})
for _, enm := range edgeNodeMetrics {
nodeMetrics[enm].MetricValues = append(nodeMetrics[enm].MetricValues, *metricValues[enm])
}
}
for _, enm := range edgeNodeMetrics {
res = append(res, monitoring.Metric{MetricName: enm, MetricData: *nodeMetrics[enm]})
}
}
return res
}
func (m metricsServer) GetMetadata(namespace string) []monitoring.Metadata {
var meta []monitoring.Metadata
return meta
}
func (m metricsServer) GetMetricLabelSet(expr string, start, end time.Time) []map[string]string {
var res []map[string]string
return res
}

View File

@@ -120,7 +120,7 @@ func generateSwaggerJson() []byte {
urlruntime.Must(devopsv1alpha2.AddToContainer(container, informerFactory.KubeSphereSharedInformerFactory(), &fakedevops.Devops{}, nil, clientsets.KubeSphere(), fakes3.NewFakeS3(), "", nil)) urlruntime.Must(devopsv1alpha2.AddToContainer(container, informerFactory.KubeSphereSharedInformerFactory(), &fakedevops.Devops{}, nil, clientsets.KubeSphere(), fakes3.NewFakeS3(), "", nil))
urlruntime.Must(devopsv1alpha3.AddToContainer(container, &fakedevops.Devops{}, clientsets.Kubernetes(), clientsets.KubeSphere(), informerFactory.KubeSphereSharedInformerFactory(), informerFactory.KubernetesSharedInformerFactory())) urlruntime.Must(devopsv1alpha3.AddToContainer(container, &fakedevops.Devops{}, clientsets.Kubernetes(), clientsets.KubeSphere(), informerFactory.KubeSphereSharedInformerFactory(), informerFactory.KubernetesSharedInformerFactory()))
urlruntime.Must(iamv1alpha2.AddToContainer(container, nil, nil, group.New(informerFactory, clientsets.KubeSphere(), clientsets.Kubernetes()), nil)) urlruntime.Must(iamv1alpha2.AddToContainer(container, nil, nil, group.New(informerFactory, clientsets.KubeSphere(), clientsets.Kubernetes()), nil))
urlruntime.Must(monitoringv1alpha3.AddToContainer(container, clientsets.Kubernetes(), nil, informerFactory, nil)) urlruntime.Must(monitoringv1alpha3.AddToContainer(container, clientsets.Kubernetes(), nil, nil, informerFactory, nil))
urlruntime.Must(openpitrixv1.AddToContainer(container, informerFactory, openpitrix.NewMockClient(nil))) urlruntime.Must(openpitrixv1.AddToContainer(container, informerFactory, openpitrix.NewMockClient(nil)))
urlruntime.Must(operationsv1alpha2.AddToContainer(container, clientsets.Kubernetes())) urlruntime.Must(operationsv1alpha2.AddToContainer(container, clientsets.Kubernetes()))
urlruntime.Must(resourcesv1alpha2.AddToContainer(container, clientsets.Kubernetes(), informerFactory, "")) urlruntime.Must(resourcesv1alpha2.AddToContainer(container, clientsets.Kubernetes(), informerFactory, ""))