Merge pull request #121 from zryfish/fix_heapster_client

Add error check to heapsterclient response
This commit is contained in:
zryfish
2018-06-26 16:05:28 +08:00
committed by GitHub
8 changed files with 80 additions and 58 deletions

View File

@@ -60,6 +60,10 @@ func handleNodes(request *restful.Request, response *restful.Response) {
result.Items = append(result.Items, <-ch)
}
if result.Items == nil {
result.Items = make([]interface{}, 0)
}
result.TotalCount = len(result.Items)
response.WriteAsJson(result)
}

View File

@@ -73,7 +73,8 @@ func handleAllPods(_ *restful.Request, response *restful.Response) {
// Get pods metrics in namespace
func handlePodsUnderNameSpace(request *restful.Request, response *restful.Response) {
var result constants.PageableResponse
result = metrics.GetPodMetricsInNamespace(request.PathParameter("namespace"))
labelSelector := request.QueryParameter("labelSelector")
result = metrics.GetPodMetricsInNamespace(request.PathParameter("namespace"), labelSelector)
response.WriteAsJson(result)
}

View File

@@ -54,6 +54,12 @@ func GetHeapsterMetricsJson(url string) *jason.Object {
}
}
// return empty json in case of error response from es-node
if data == nil {
emptyJSON := `{}`
data, _ = jason.NewObjectFromBytes([]byte(emptyJSON))
}
return data
}

View File

@@ -27,8 +27,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
const inUse = "kubesphere.io/in_use_pods"
@@ -269,22 +267,6 @@ func (ctl *PodCtl) ListWithConditions(conditions string, paging *Paging) (int, i
listWithConditions(ctl.DB, &total, &object, &list, conditions, paging, order)
ch := make(chan metrics.PodMetrics)
for index, _ := range list {
go metrics.GetSinglePodMetrics(list[index].Namespace, list[index].Name, ch)
}
var resultMetrics = make(map[string]metrics.PodMetrics)
for range list {
podMetric := <-ch
resultMetrics[podMetric.PodName] = podMetric
}
for index, _ := range list {
list[index].Metrics = resultMetrics[list[index].Name]
}
return total, list, nil
}

View File

@@ -26,8 +26,6 @@ import (
"github.com/jinzhu/gorm"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
const (
@@ -179,17 +177,16 @@ func (Ingress) TableName() string {
}
type Pod struct {
Name string `gorm:"primary_key" json:"name"`
Namespace string `gorm:"primary_key" json:"namespace"`
Status string `json:"status,omitempty"`
Node string `json:"node,omitempty"`
NodeIp string `json:"nodeIp,omitempty"`
PodIp string `json:"podIp,omitempty"`
Containers Containers `gorm:"type:text" json:"containers,omitempty"`
Annotation Annotation `json:"annotations"`
RestartCount int `json:"restartCount"`
Metrics metrics.PodMetrics `json:"metrics,omitempty"`
CreateTime time.Time `gorm:"column:createTime" json:"createTime,omitempty"`
Name string `gorm:"primary_key" json:"name"`
Namespace string `gorm:"primary_key" json:"namespace"`
Status string `json:"status,omitempty"`
Node string `json:"node,omitempty"`
NodeIp string `json:"nodeIp,omitempty"`
PodIp string `json:"podIp,omitempty"`
Containers Containers `gorm:"type:text" json:"containers,omitempty"`
Annotation Annotation `json:"annotations"`
RestartCount int `json:"restartCount"`
CreateTime time.Time `gorm:"column:createTime" json:"createTime,omitempty"`
}
type Container struct {

View File

@@ -117,7 +117,10 @@ func FormatContainerMetrics(namespace, podName, containerName string) ContainerM
cpuUsageRate := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/usage_rate")
cpuUsageRateMetrics, err := cpuUsageRate.GetObjectArray("metrics")
if err == nil && len(cpuUsageRateMetrics) != 0 {
if err != nil {
glog.Error(err)
containerCPUMetrics = make([]ContainerCpuMetrics, 0)
} else {
for _, metric := range cpuUsageRateMetrics {
timestamp, _ := metric.GetString("timestamp")
cpuMetrics.TimeStamp = timestamp
@@ -132,7 +135,10 @@ func FormatContainerMetrics(namespace, podName, containerName string) ContainerM
memoryUsage := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/usage")
memoryUsageMetrics, err := memoryUsage.GetObjectArray("metrics")
if err == nil && len(memoryUsageMetrics) != 0 {
if err != nil {
glog.Error(err)
containerMemMetrics = make([]ContainerMemoryMetrics, 0)
} else {
for _, metric := range memoryUsageMetrics {
timestamp, _ := metric.GetString("timestamp")
memMetrics.TimeStamp = timestamp

View File

@@ -90,6 +90,7 @@ func GetNodes() []string {
err := dec.Decode(&nodes)
if err != nil {
glog.Error(err)
return make([]string, 0)
}
return nodes
}
@@ -116,7 +117,10 @@ func FormatNodeMetrics(nodeName string) NodeMetrics {
cpuUsageRate := client.GetHeapsterMetricsJson("/nodes/" + nodeName + "/metrics/cpu/usage_rate")
cpuUsageRateMetrics, err := cpuUsageRate.GetObjectArray("metrics")
if len(cpuUsageRateMetrics) != 0 {
if err != nil {
glog.Error(err)
nodeCPUMetrics = make([]NodeCpuMetrics, 0)
} else {
for _, metric := range cpuUsageRateMetrics {
timestamp, _ := metric.GetString("timestamp")
usedCpu, _ := metric.GetFloat64("value")
@@ -141,7 +145,10 @@ func FormatNodeMetrics(nodeName string) NodeMetrics {
memUsage := client.GetHeapsterMetricsJson("/nodes/" + nodeName + "/metrics/memory/working_set")
memUsageMetrics, err := memUsage.GetObjectArray("metrics")
if err == nil && len(memUsageMetrics) != 0 {
if err != nil {
glog.Error(err)
nodeMemMetrics = make([]NodeMemoryMetrics, 0)
} else {
for _, metric := range memUsageMetrics {
timestamp, _ := metric.GetString("timestamp")
usedMemoryBytes, err = metric.GetFloat64("value")

View File

@@ -73,11 +73,18 @@ func GetPodsMetrics(podList *coreV1.PodList) []PodMetrics {
return items
}
func GetPodMetricsInNamespace(namespace string) constants.PageableResponse {
func GetPodMetricsInNamespace(namespace string, labelSelector string) constants.PageableResponse {
var podMetrics constants.PageableResponse
k8sClient := client.NewK8sClient()
options := v1.ListOptions{}
var options v1.ListOptions
if len(labelSelector) > 0 {
options = v1.ListOptions{
LabelSelector: labelSelector,
}
} else {
options = v1.ListOptions{}
}
podList, _ := k8sClient.CoreV1().Pods(namespace).List(options)
for _, podMetric := range GetPodsMetrics(podList) {
@@ -275,6 +282,7 @@ func FormatPodMetrics(namespace string, pod string) PodMetrics {
memoryLimit := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/limit")
memoryLimitMetrics, err := memoryLimit.GetObjectArray("metrics")
if err != nil || len(memoryLimitMetrics) == 0 {
glog.Error(err)
resultPod.MemoryLimit = Inf
} else {
data, _ := memoryLimitMetrics[0].GetNumber("value")
@@ -284,34 +292,45 @@ func FormatPodMetrics(namespace string, pod string) PodMetrics {
}
cpuUsageRate := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/usage_rate")
cpuUsageRateMetrics, _ := cpuUsageRate.GetObjectArray("metrics")
for _, cpuUsageRateMetric := range cpuUsageRateMetrics {
timestamp, _ := cpuUsageRateMetric.GetString("timestamp")
cpuUsageRate, _ := cpuUsageRateMetric.GetFloat64("value")
cpuMetrics.TimeStamp = timestamp
cpuMetrics.UsedCpu = fmt.Sprintf("%.1f", cpuUsageRate)
cpuUsageRateMetrics, err := cpuUsageRate.GetObjectArray("metrics")
if err != nil {
glog.Error(err)
resultPod.CPU = make([]PodCpuMetrics, 0)
} else {
for _, cpuUsageRateMetric := range cpuUsageRateMetrics {
timestamp, _ := cpuUsageRateMetric.GetString("timestamp")
cpuUsageRate, _ := cpuUsageRateMetric.GetFloat64("value")
cpuMetrics.TimeStamp = timestamp
cpuMetrics.UsedCpu = fmt.Sprintf("%.1f", cpuUsageRate)
podCPUMetrics = append(podCPUMetrics, cpuMetrics)
}
resultPod.CPU = podCPUMetrics
podCPUMetrics = append(podCPUMetrics, cpuMetrics)
}
resultPod.CPU = podCPUMetrics
memUsage := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/usage")
memoryUsageMetrics, err := memUsage.GetObjectArray("metrics")
for _, memoryUsageMetric := range memoryUsageMetrics {
timestamp, _ := memoryUsageMetric.GetString("timestamp")
memoryMetrics.TimeStamp = timestamp
usedMemoryBytes, err := memoryUsageMetric.GetFloat64("value")
if err == nil {
memoryMetrics.UsedMemory = fmt.Sprintf("%.1f", usedMemoryBytes/1024/1024)
} else {
memoryMetrics.UsedMemory = Inf
if err != nil {
glog.Error(err)
resultPod.Memory = make([]PodMemoryMetrics, 0)
} else {
for _, memoryUsageMetric := range memoryUsageMetrics {
timestamp, _ := memoryUsageMetric.GetString("timestamp")
memoryMetrics.TimeStamp = timestamp
usedMemoryBytes, err := memoryUsageMetric.GetFloat64("value")
if err == nil {
memoryMetrics.UsedMemory = fmt.Sprintf("%.1f", usedMemoryBytes/1024/1024)
} else {
memoryMetrics.UsedMemory = Inf
}
podMemMetrics = append(podMemMetrics, memoryMetrics)
}
podMemMetrics = append(podMemMetrics, memoryMetrics)
resultPod.Memory = podMemMetrics
}
resultPod.Memory = podMemMetrics
return resultPod
}