Merge branch 'master' into master

This commit is contained in:
richardxz
2018-06-20 11:26:01 +08:00
committed by GitHub
433 changed files with 32551 additions and 4234 deletions

View File

@@ -21,38 +21,41 @@ import (
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/filter/route"
"kubesphere.io/kubesphere/pkg/models"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
// {namespace} namespace name
// {node} node host name
// {pod} pod name
// {container} container name
func Register(ws *restful.WebService) {
ws.Route(ws.GET("/namespaces/{namespace}/pods/{podname}/containers/{containername}").To(handleContainerUnderNameSpaceAndPod).Filter(route.RouteLogging)).
ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers/{container}").To(handleContainerUnderNameSpaceAndPod).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods/{podname}/containers").To(handleContainersUnderNameSpaceAndPod).Filter(route.RouteLogging)).
ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers").To(handleContainersUnderNameSpaceAndPod).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes/{nodename}/namespaces/{namespace}/pods/{podname}/containers").To(handleContainersUnderNodeAndNameSpaceAndPod).Filter(route.RouteLogging)).
ws.Route(ws.GET("/nodes/{node}/namespaces/{namespace}/pods/{pod}/containers").To(handleContainersUnderNodeAndNameSpaceAndPod).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
}
func handleContainerUnderNameSpaceAndPod(request *restful.Request, response *restful.Response) {
var resultContainer models.ResultContainer
resultContainer = models.FormatContainerMetrics(request.PathParameter("namespace"), request.PathParameter("podname"), request.PathParameter("containername"))
resultContainer.NodeName = models.GetNodeNameForPod(request.PathParameter("podname"), request.PathParameter("namespace"))
var resultContainer metrics.ContainerMetrics
resultContainer = metrics.FormatContainerMetrics(request.PathParameter("namespace"), request.PathParameter("pod"), request.PathParameter("container"))
resultContainer.NodeName = metrics.GetNodeNameForPod(request.PathParameter("pod"), request.PathParameter("namespace"))
response.WriteAsJson(resultContainer)
}
func handleContainersUnderNameSpaceAndPod(request *restful.Request, response *restful.Response) {
var resultNameSpace constants.PageableResponse
resultNameSpace = models.FormatContainersMetrics("", request.PathParameter("namespace"), request.PathParameter("podname"))
resultNameSpace = metrics.FormatContainersMetrics("", request.PathParameter("namespace"), request.PathParameter("pod"))
response.WriteAsJson(resultNameSpace)
}
func handleContainersUnderNodeAndNameSpaceAndPod(request *restful.Request, response *restful.Response) {
var resultNameSpace constants.PageableResponse
resultNameSpace = models.FormatContainersMetrics(request.PathParameter("nodename"), request.PathParameter("namespace"), request.PathParameter("podname"))
resultNameSpace = metrics.FormatContainersMetrics(request.PathParameter("node"), request.PathParameter("namespace"), request.PathParameter("pod"))
response.WriteAsJson(resultNameSpace)
}

View File

@@ -23,7 +23,7 @@ import (
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/filter/route"
"kubesphere.io/kubesphere/pkg/models"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
func Register(ws *restful.WebService, subPath string) {
@@ -38,31 +38,41 @@ func Register(ws *restful.WebService, subPath string) {
ws.Route(ws.POST(subPath+"/{nodename}/drainage").To(handleDrainNode).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET(subPath+"/{nodename}/drainage").To(handleDrainStatus).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
}
func MakeRequest(node string, ch chan<- metrics.NodeMetrics) {
resultNode := metrics.FormatNodeMetrics(node)
ch <- resultNode
}
func handleNodes(request *restful.Request, response *restful.Response) {
var result constants.PageableResponse
var resultNode models.ResultNode
nodes := models.GetNodes()
nodes := metrics.GetNodes()
var total_count int
for i, node := range nodes {
resultNode = models.FormatNodeMetrics(node)
result.Items = append(result.Items, resultNode)
total_count = i
ch := make(chan metrics.NodeMetrics)
for _, node := range nodes {
go MakeRequest(node, ch)
}
total_count = total_count + 1
result.TotalCount = total_count
for _, _ = range nodes {
result.Items = append(result.Items, <-ch)
}
result.TotalCount = len(result.Items)
response.WriteAsJson(result)
}
func handleSingleNode(request *restful.Request, response *restful.Response) {
nodeName := request.PathParameter("nodename")
var resultNode models.ResultNode
var resultNode metrics.NodeMetrics
resultNode = models.FormatNodeMetrics(nodeName)
resultNode = metrics.FormatNodeMetrics(nodeName)
response.WriteAsJson(resultNode)
}
@@ -71,7 +81,7 @@ func handleDrainNode(request *restful.Request, response *restful.Response) {
nodeName := request.PathParameter("nodename")
result, err := models.DrainNode(nodeName)
result, err := metrics.DrainNode(nodeName)
if err != nil {
@@ -84,3 +94,20 @@ func handleDrainNode(request *restful.Request, response *restful.Response) {
}
}
func handleDrainStatus(request *restful.Request, response *restful.Response) {
nodeName := request.PathParameter("nodename")
result, err := metrics.DrainStatus(nodeName)
if err != nil {
response.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
} else {
response.WriteAsJson(result)
}
}

View File

@@ -22,7 +22,7 @@ import (
"kubesphere.io/kubesphere/pkg/filter/route"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/models"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
func Register(ws *restful.WebService) {
@@ -30,75 +30,48 @@ func Register(ws *restful.WebService) {
ws.Route(ws.GET("/pods").To(handleAllPods).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods/{podname}").To(handlePodUnderNameSpace).Filter(route.RouteLogging)).
ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}").To(handlePodUnderNameSpace).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/namespaces/{namespace}/pods").To(handlePodsUnderNameSpace).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes/{nodename}/pods").To(handlePodsUnderNode).Filter(route.RouteLogging)).
ws.Route(ws.GET("/nodes/{node}/pods").To(handlePodsUnderNode).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
ws.Route(ws.GET("/nodes/{nodename}/namespaces/{namespace}/pods").To(handlePodsUnderNodeAndNameSpace).Filter(route.RouteLogging)).
ws.Route(ws.GET("/nodes/{node}/namespaces/{namespace}/pods").To(handlePodsUnderNodeAndNameSpace).Filter(route.RouteLogging)).
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON)
}
func handleAllPods(request *restful.Request, response *restful.Response) {
var result constants.PageableResponse
namespaces := models.GetNameSpaces()
var total_count int
for i, namespace := range namespaces {
result = models.FormatPodsMetrics("", namespace)
result.Items = append(result.Items, result)
total_count = i
}
result.TotalCount = total_count
result = metrics.GetAllPodMetrics()
response.WriteAsJson(result)
}
func handlePodsUnderNameSpace(request *restful.Request, response *restful.Response) {
var result constants.PageableResponse
result = models.FormatPodsMetrics("", request.PathParameter("namespace"))
result = metrics.GetPodMetricsInNamespace(request.PathParameter("namespace"))
response.WriteAsJson(result)
}
func handlePodsUnderNode(request *restful.Request, response *restful.Response) {
var result constants.PageableResponse
var resultNameSpace constants.PageableResponse
namespaces := models.GetNameSpaces()
var total_count int
for _, namespace := range namespaces {
resultNameSpace = models.FormatPodsMetrics(request.PathParameter("nodename"), namespace)
var sub_total_count int
for j, pod := range resultNameSpace.Items {
result.Items = append(result.Items, pod)
sub_total_count = j
}
total_count += sub_total_count
}
result.TotalCount = total_count
result = metrics.GetPodMetricsInNode(request.PathParameter("node"))
response.WriteAsJson(result)
}
func handlePodUnderNameSpace(request *restful.Request, response *restful.Response) {
var resultPod models.ResultPod
resultPod = models.FormatPodMetrics(request.PathParameter("namespace"), request.PathParameter("podname"))
var resultPod metrics.PodMetrics
resultPod = metrics.FormatPodMetrics(request.PathParameter("namespace"), request.PathParameter("pod"))
response.WriteAsJson(resultPod)
}
func handlePodsUnderNodeAndNameSpace(request *restful.Request, response *restful.Response) {
var result constants.PageableResponse
result = models.FormatPodsMetrics(request.PathParameter("nodename"), request.PathParameter("namespace"))
nodeName := request.PathParameter("node")
namespace := request.PathParameter("namespace")
result = metrics.GetPodMetricsInNamespaceOfNode(namespace, nodeName)
response.WriteAsJson(result)
}

View File

@@ -42,5 +42,6 @@ func listResource(req *restful.Request, resp *restful.Response) {
resp.WriteHeaderAndEntity(http.StatusInternalServerError, constants.MessageResponse{Message: err.Error()})
return
}
resp.WriteEntity(res)
}

View File

@@ -112,7 +112,6 @@ func (server *kubeSphereServer) run() {
}
func Run() {
server := newKubeSphereServer(options.ServerOptions)
server.run()

View File

@@ -20,27 +20,48 @@ import (
"net/http"
"io/ioutil"
"os"
"time"
"github.com/antonholmquist/jason"
"github.com/golang/glog"
)
const (
DefaultHeapsterScheme = "http"
DefaultHeapsterService = "heapster" //"heapster"
DefaultHeapsterPort = "80" // use the first exposed port on the service
DefaultHeapsterService = "192.168.0.34" //"heapster"
DefaultHeapsterPort = "31082" // use the first exposed port on the service
HeapsterApiPath = "/api/v1/model"
HeapsterEndpointUrl = DefaultHeapsterScheme + "://" + DefaultHeapsterService + ":" + DefaultHeapsterPort + HeapsterApiPath
)
var (
prefix = "/api/v1/model"
)
var httpClient = &http.Client{Timeout: 30 * time.Second}
// Get heapster response in python-like dictionary
func GetHeapsterMetricsJson(url string) *jason.Object {
response, err := httpClient.Get(HeapsterEndpointUrl + url)
var data *jason.Object
if err != nil {
glog.Error(url, err)
} else {
defer response.Body.Close()
data, err = jason.NewObjectFromReader(response.Body)
if err != nil {
glog.Error(url, err)
}
}
return data
}
func GetHeapsterMetrics(url string) string {
//glog.Info("Querying data from " + DefaultHeapsterScheme + "://" + DefaultHeapsterService + ":" + DefaultHeapsterPort + prefix + url)
response, err := http.Get(DefaultHeapsterScheme + "://" + DefaultHeapsterService + ":" + DefaultHeapsterPort + prefix + url)
response, err := httpClient.Get(HeapsterEndpointUrl + url)
if err != nil {
glog.Error(err)
os.Exit(1)
} else {
defer response.Body.Close()
@@ -48,7 +69,6 @@ func GetHeapsterMetrics(url string) string {
if err != nil {
glog.Error(err)
os.Exit(1)
}
return string(contents)
@@ -57,10 +77,9 @@ func GetHeapsterMetrics(url string) string {
}
func GetCAdvisorMetrics(nodeAddr string) string {
response, err := http.Get("http://" + nodeAddr + ":10255/stats/summary")
response, err := httpClient.Get("http://" + nodeAddr + ":10255/stats/summary")
if err != nil {
glog.Error(err)
os.Exit(1)
} else {
defer response.Body.Close()
@@ -68,7 +87,6 @@ func GetCAdvisorMetrics(nodeAddr string) string {
if err != nil {
glog.Error(err)
os.Exit(1)
}
return string(contents)

View File

@@ -26,9 +26,12 @@ type PageableResponse struct {
}
const (
APIVERSION = "v1alpha1"
KIND = "kubesphere"
NameSpace = "kubesphere"
DATA_HOME = "/etc/kubesphere"
INGRESS_CONTROLLER_FOLDER = DATA_HOME + "/ingress-controller"
APIVERSION = "v1alpha1"
KIND = "kubesphere"
NameSpace = "kubesphere"
DataHome = "/etc/kubesphere"
IngressControllerFolder = DataHome + "/ingress-controller"
IngressControllerNamespace = "kubesphere-router-system"
IngressControllerPrefix = "kubesphere-router-"
)

View File

@@ -43,7 +43,7 @@ type Components struct {
SelfLink string `json:"selfLink"`
Label interface{} `json:"label"`
HealthStatus string `json:"healthStatus"`
CreateTime time.Time `json:"updateTime"`
CreateTime time.Time `json:"createTime"`
}
/***
@@ -173,7 +173,6 @@ func GetComponentsByNamespace(ns string) ([]Components, error) {
if ns != KUBESYSTEM {
option.LabelSelector = ""
}
servicelists, err := k8sClient.CoreV1().Services(ns).List(option)
if err != nil {

View File

@@ -1,151 +0,0 @@
package models
import (
"encoding/json"
"strings"
"github.com/golang/glog"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
ksutil "kubesphere.io/kubesphere/pkg/util"
"fmt"
"strconv"
)
type ResultContainer struct {
NodeName string `json:"node_name"`
ContainerName string `json:"container_name"`
CPURequest string `json:"cpu_request"`
CPULimit string `json:"cpu_limit"`
MemoryRequest string `json:"mem_request"`
MemoryLimit string `json:"mem_limit"`
CPU []CPUContainer `json:"cpu"`
Memory []MemoryContainer `json:"memory"`
}
type CPUContainer struct {
TimeStamp string `json:"timestamp"`
UsedCPU string `json:"used_cpu"`
CPUUtilization string `json:"cpu_utilization"`
}
type MemoryContainer struct {
TimeStamp string `json:"timestamp"`
UsedMemory string `json:"used_mem"`
MemoryUtilization string `json:"mem_utilization"`
}
/*
Get all containers under specified namespace in default cluster
*/
func GetContainers(namespace, podName string) []string {
containersList := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers")
var containers []string
dec := json.NewDecoder(strings.NewReader(containersList))
err := dec.Decode(&containers)
if err != nil {
glog.Error(err)
}
return containers
}
func FormatContainersMetrics(nodeName, namespace, podName string) constants.PageableResponse {
var result constants.PageableResponse
var resultContainer ResultContainer
var containers []string
var total_count int
containers = GetContainers(namespace, podName)
for i, container := range containers {
resultContainer = FormatContainerMetrics(namespace, podName, container)
if nodeName != "" {
resultContainer.NodeName = nodeName
} else {
resultContainer.NodeName = GetNodeNameForPod(podName, namespace)
}
result.Items = append(result.Items, resultContainer)
total_count = i
}
result.TotalCount = total_count + 1
return result
}
func FormatContainerMetrics(namespace, podName, containerName string) ResultContainer {
var resultContainer ResultContainer
var containerCPUMetrics []CPUContainer
var containerMemMetrics []MemoryContainer
var cpuMetrics CPUContainer
var memMetrics MemoryContainer
resultContainer.ContainerName = containerName
cpuRequest := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/request")
cpuRequest = ksutil.JsonRawMessage(cpuRequest).Find("metrics").ToList()[0].Find("value").ToString()
if cpuRequest != "" && cpuRequest != "0" {
resultContainer.CPURequest = cpuRequest
} else {
resultContainer.CPURequest = "inf"
}
cpuLimit := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/limit")
cpuLimit = ksutil.JsonRawMessage(cpuLimit).Find("metrics").ToList()[0].Find("value").ToString()
if cpuLimit != "" && cpuLimit != "0" {
resultContainer.CPULimit = cpuLimit
} else {
resultContainer.CPULimit = "inf"
}
memoryRequest := ksutil.JsonRawMessage(client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/request")).Find("metrics").ToList()[0].Find("value").ToString()
resultContainer.MemoryRequest = ConvertMemory(memoryRequest)
memoryLimit := ksutil.JsonRawMessage(client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/limit")).Find("metrics").ToList()[0].Find("value").ToString()
resultContainer.MemoryLimit = ConvertMemory(memoryLimit)
cpuUsageRate := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/usage_rate")
if cpuUsageRate != "" {
metrics := ksutil.JsonRawMessage(cpuUsageRate).Find("metrics").ToList()
for _, metric := range metrics {
timestamp := metric.Find("timestamp")
cpu_utilization, _ := strconv.ParseFloat(ConvertCPUUsageRate(metric.Find("value").ToString()), 64)
cpuMetrics.TimeStamp = timestamp.ToString()
cpuMetrics.CPUUtilization = fmt.Sprintf("%.3f", cpu_utilization)
if resultContainer.CPULimit != "inf" {
cpu_limit, _ := strconv.ParseFloat(resultContainer.CPULimit, 64)
cpuMetrics.UsedCPU = fmt.Sprintf("%.1f", cpu_limit*cpu_utilization)
} else {
cpuMetrics.UsedCPU = "inf"
}
glog.Info("pod " + podName + " has limit cpu " + resultContainer.CPULimit + " CPU utilization " + fmt.Sprintf("%.3f", cpu_utilization) + " at time" + timestamp.ToString())
containerCPUMetrics = append(containerCPUMetrics, cpuMetrics)
}
}
resultContainer.CPU = containerCPUMetrics
var used_mem_bytes float64
memUsage := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/usage")
if memUsage != "" {
metrics := ksutil.JsonRawMessage(memUsage).Find("metrics").ToList()
for _, metric := range metrics {
timestamp := metric.Find("timestamp")
used_mem_bytes, _ = strconv.ParseFloat(metric.Find("value").ToString(), 64)
used_mem := used_mem_bytes / 1024 / 1024
memMetrics.TimeStamp = timestamp.ToString()
memMetrics.UsedMemory = fmt.Sprintf("%.1f", used_mem)
memMetrics.MemoryUtilization = fmt.Sprintf("%.3f", CalculateMemoryUsage(memoryRequest, memoryLimit, metric.Find("value").ToString()))
glog.Info("pod " + podName + " has limit mem " + resultContainer.MemoryLimit + " mem utilization " + memMetrics.MemoryUtilization + " at time" + timestamp.ToString())
containerMemMetrics = append(containerMemMetrics, memMetrics)
}
}
resultContainer.Memory = containerMemMetrics
return resultContainer
}

View File

@@ -27,6 +27,8 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
const inUse = "in_use_pods"
@@ -262,6 +264,22 @@ func (ctl *PodCtl) ListWithConditions(conditions string, paging *Paging) (int, i
listWithConditions(ctl.DB, &total, &object, &list, conditions, paging, order)
ch := make(chan metrics.PodMetrics)
for index, _ := range list {
go metrics.GetSinglePodMetrics(list[index].Namespace, list[index].Name, ch)
}
var resultMetrics = make(map[string]metrics.PodMetrics)
for range list {
podMetric := <-ch
resultMetrics[podMetric.PodName] = podMetric
}
for index, _ := range list {
list[index].Metrics = resultMetrics[list[index].Name]
}
return total, list, nil
}

View File

@@ -26,6 +26,8 @@ import (
"github.com/jinzhu/gorm"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"kubesphere.io/kubesphere/pkg/models/metrics"
)
const (

View File

@@ -0,0 +1,148 @@
package metrics
import (
"encoding/json"
"strings"
"github.com/golang/glog"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
"fmt"
)
type ContainerMetrics struct {
NodeName string `json:"node_name"`
ContainerName string `json:"container_name"`
CpuRequest string `json:"cpu_request"`
CpuLimit string `json:"cpu_limit"`
MemoryRequest string `json:"mem_request"`
MemoryLimit string `json:"mem_limit"`
Cpu []ContainerCpuMetrics `json:"cpu"`
Memory []ContainerMemoryMetrics `json:"memory"`
}
type ContainerCpuMetrics struct {
TimeStamp string `json:"timestamp"`
UsedCpu string `json:"used_cpu"`
}
type ContainerMemoryMetrics struct {
TimeStamp string `json:"timestamp"`
UsedMemory string `json:"used_mem"`
}
/*
Get all containers under specified namespace in default cluster
*/
func GetContainers(namespace, podName string) []string {
containersList := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + podName + "/containers")
var containers []string
dec := json.NewDecoder(strings.NewReader(containersList))
err := dec.Decode(&containers)
if err != nil {
glog.Error(err)
}
return containers
}
func FormatContainersMetrics(nodeName, namespace, podName string) constants.PageableResponse {
var result constants.PageableResponse
var resultContainer ContainerMetrics
var containers []string
var total_count int
containers = GetContainers(namespace, podName)
for i, container := range containers {
resultContainer = FormatContainerMetrics(namespace, podName, container)
if nodeName != "" {
resultContainer.NodeName = nodeName
} else {
resultContainer.NodeName = GetNodeNameForPod(podName, namespace)
}
result.Items = append(result.Items, resultContainer)
total_count = i
}
result.TotalCount = total_count + 1
return result
}
func FormatContainerMetrics(namespace, podName, containerName string) ContainerMetrics {
var resultContainer ContainerMetrics
var containerCPUMetrics []ContainerCpuMetrics
var containerMemMetrics []ContainerMemoryMetrics
var cpuMetrics ContainerCpuMetrics
var memMetrics ContainerMemoryMetrics
resultContainer.ContainerName = containerName
cpuRequest := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/request")
cpuRequestMetrics, err := cpuRequest.GetObjectArray("metrics")
if err == nil && len(cpuRequestMetrics) != 0 {
requestCpu, _ := cpuRequestMetrics[0].GetFloat64("value")
resultContainer.CpuRequest = fmt.Sprintf("%.1f", requestCpu)
} else {
resultContainer.CpuRequest = Inf
}
cpuLimit := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/limit")
cpuLimitMetrics, err := cpuLimit.GetObjectArray("metrics")
if err == nil && len(cpuLimitMetrics) != 0 {
limitCpu, _ := cpuLimitMetrics[0].GetFloat64("value")
resultContainer.CpuLimit = fmt.Sprintf("%.1f", limitCpu)
} else {
resultContainer.CpuLimit = Inf
}
memoryRequst := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/request")
memoryRequstMetrics, err := memoryRequst.GetObjectArray("metrics")
if err == nil && len(memoryRequstMetrics) != 0 {
requestMemory, _ := memoryRequstMetrics[0].GetFloat64("value")
resultContainer.MemoryRequest = fmt.Sprintf("%.1f", requestMemory)
} else {
resultContainer.MemoryRequest = Inf
}
memoryLimit := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/limit")
memoryLimitMetrics, err := memoryLimit.GetObjectArray("metrics")
if err == nil && len(memoryLimitMetrics) != 0 {
limitMemory, _ := memoryLimitMetrics[0].GetFloat64("value")
resultContainer.MemoryLimit = fmt.Sprintf("%.1f", limitMemory)
} else {
resultContainer.MemoryLimit = Inf
}
cpuUsageRate := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/cpu/usage_rate")
cpuUsageRateMetrics, err := cpuUsageRate.GetObjectArray("metrics")
if err == nil && len(cpuUsageRateMetrics) != 0 {
for _, metric := range cpuUsageRateMetrics {
timestamp, _ := metric.GetString("timestamp")
cpuMetrics.TimeStamp = timestamp
usedCpu, _ := metric.GetFloat64("value")
cpuMetrics.UsedCpu = fmt.Sprintf("%.1f", usedCpu)
containerCPUMetrics = append(containerCPUMetrics, cpuMetrics)
}
}
resultContainer.Cpu = containerCPUMetrics
memoryUsage := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + podName + "/containers/" + containerName + "/metrics/memory/usage")
memoryUsageMetrics, err := memoryUsage.GetObjectArray("metrics")
if err == nil && len(memoryUsageMetrics) != 0 {
for _, metric := range memoryUsageMetrics {
timestamp, _ := metric.GetString("timestamp")
memMetrics.TimeStamp = timestamp
usedMemoryBytes, _ := metric.GetFloat64("value")
memMetrics.UsedMemory = fmt.Sprintf("%.1f", usedMemoryBytes/1024/1024)
containerMemMetrics = append(containerMemMetrics, memMetrics)
}
}
resultContainer.Memory = containerMemMetrics
return resultContainer
}

485
pkg/models/metrics/nodes.go Normal file
View File

@@ -0,0 +1,485 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"kubesphere.io/kubesphere/pkg/client"
kubeclient "kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
ksutil "kubesphere.io/kubesphere/pkg/util"
)
const (
//status: "False"
OutOfDisk = "OutOfDisk"
//status: "False"
MemoryPressure = "MemoryPressure"
//status: "False"
DiskPressure = "DiskPressure"
//status: "False"
PIDPressure = "PIDPressure"
//status: "True"
KubeletReady = "Ready"
)
const GracePeriods = 900
type NodeMetrics struct {
NodeName string `json:"node_name"`
NodeStatus string `json:"node_status"`
PodsCount string `json:"pods_count"`
PodsCapacity string `json:"pods_capacity"`
UsedFS string `json:"used_fs"`
TotalFS string `json:"total_fs"`
FSUtilization string `json:"fs_utilization"`
CPU []NodeCpuMetrics `json:"cpu"`
Memory []NodeMemoryMetrics `json:"memory"`
}
type NodeCpuMetrics struct {
TimeStamp string `json:"timestamp"`
UsedCPU string `json:"used_cpu"`
TotalCPU string `json:"total_cpu"`
CPUUtilization string `json:"cpu_utilization"`
}
type NodeMemoryMetrics struct {
TimeStamp string `json:"timestamp"`
UsedMemory string `json:"used_mem"`
TotalMemory string `json:"total_mem"`
MemoryUtilization string `json:"mem_utilization"`
}
/*
Get all nodes in default cluster
*/
func GetNodes() []string {
nodesList := client.GetHeapsterMetrics("/nodes")
var nodes []string
dec := json.NewDecoder(strings.NewReader(nodesList))
err := dec.Decode(&nodes)
if err != nil {
glog.Error(err)
}
return nodes
}
/*
Format cpu/memory data for specified node
*/
func FormatNodeMetrics(nodeName string) NodeMetrics {
var resultNode NodeMetrics
var nodeCPUMetrics []NodeCpuMetrics
var nodeMemMetrics []NodeMemoryMetrics
var cpuMetrics NodeCpuMetrics
var memMetrics NodeMemoryMetrics
var totalCpu float64
cpuNodeAllocated := client.GetHeapsterMetricsJson("/nodes/" + nodeName + "/metrics/cpu/node_allocatable")
cpuNodeAllocatedMetrics, err := cpuNodeAllocated.GetObjectArray("metrics")
if err == nil && len(cpuNodeAllocatedMetrics) != 0 {
totalCpu, err = cpuNodeAllocatedMetrics[0].GetFloat64("value")
if err != nil {
glog.Error(err)
}
}
cpuUsageRate := client.GetHeapsterMetricsJson("/nodes/" + nodeName + "/metrics/cpu/usage_rate")
cpuUsageRateMetrics, err := cpuUsageRate.GetObjectArray("metrics")
if len(cpuUsageRateMetrics) != 0 {
for _, metric := range cpuUsageRateMetrics {
timestamp, _ := metric.GetString("timestamp")
usedCpu, _ := metric.GetFloat64("value")
cpuMetrics.TimeStamp = timestamp
cpuMetrics.TotalCPU = fmt.Sprintf("%.1f", totalCpu/1000)
cpuMetrics.CPUUtilization = fmt.Sprintf("%.3f", usedCpu/totalCpu*100)
cpuMetrics.UsedCPU = fmt.Sprintf("%.1f", usedCpu/1000)
nodeCPUMetrics = append(nodeCPUMetrics, cpuMetrics)
}
}
memNodeAllocated := client.GetHeapsterMetricsJson("/nodes/" + nodeName + "/metrics/memory/node_allocatable")
memNodeAllocatedMetrics, err := memNodeAllocated.GetObjectArray("metrics")
var totalMemoryBytes, usedMemoryBytes float64
if err == nil && len(memNodeAllocatedMetrics) != 0 {
totalMemoryBytes, err = memNodeAllocatedMetrics[0].GetFloat64("value")
if err != nil {
glog.Error(err)
}
}
memUsage := client.GetHeapsterMetricsJson("/nodes/" + nodeName + "/metrics/memory/working_set")
memUsageMetrics, err := memUsage.GetObjectArray("metrics")
if err == nil && len(memUsageMetrics) != 0 {
for _, metric := range memUsageMetrics {
timestamp, _ := metric.GetString("timestamp")
usedMemoryBytes, err = metric.GetFloat64("value")
memMetrics.TimeStamp = timestamp
memMetrics.TotalMemory = fmt.Sprintf("%.1f", totalMemoryBytes/1024/1024/1024)
memMetrics.UsedMemory = fmt.Sprintf("%.1f", usedMemoryBytes/1024/1024/1024)
memMetrics.MemoryUtilization = fmt.Sprintf("%.3f", usedMemoryBytes/totalMemoryBytes*100)
nodeMemMetrics = append(nodeMemMetrics, memMetrics)
}
}
resultNode.NodeName = nodeName
resultNode.PodsCount = strconv.Itoa(GetPodCountOnNode(nodeName))
nodeResObj := getNodeResObj(nodeName)
resultNode.PodsCapacity = nodeResObj.Status.Capacity.Pods().String()
resultNode.NodeStatus = getNodeStatus(nodeResObj)
resultNode.UsedFS, resultNode.TotalFS, resultNode.FSUtilization = getNodeFileSystemStatus(nodeResObj)
resultNode.CPU = nodeCPUMetrics
resultNode.Memory = nodeMemMetrics
return resultNode
}
func GetPodCountOnNode(nodeName string) int {
k8sClient := client.NewK8sClient()
options := metav1.ListOptions{
FieldSelector: "spec.nodeName=" + nodeName,
}
podList, err := k8sClient.CoreV1().Pods("").List(options)
if err != nil {
glog.Error(err)
return 0
} else {
return len(podList.Items)
}
}
func getNodeResObj(nodeName string) *v1.Node {
cli := client.NewK8sClient()
node, err := cli.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
glog.Error(err)
} else {
return node
}
return nil
}
func getNodeStatus(node *v1.Node) string {
status := "Ready"
conditions := node.Status.Conditions
for _, cond := range conditions {
if cond.Type == DiskPressure && cond.Status == "True" {
status = "NotReady"
break
}
if cond.Type == OutOfDisk && cond.Status == "True" {
status = "NotReady"
break
}
if cond.Type == MemoryPressure && cond.Status == "True" {
status = "NotReady"
}
if cond.Type == PIDPressure && cond.Status == "True" {
status = "NotReady"
break
}
if cond.Type == KubeletReady && cond.Status == "False" {
status = "NotReady"
break
}
}
return status
}
func getNodeFileSystemStatus(node *v1.Node) (string, string, string) {
nodeMetricsAsStr := client.GetCAdvisorMetrics(node.Annotations["alpha.kubernetes.io/provided-node-ip"])
if nodeMetricsAsStr != "" {
usedBytesAsStr, _ := strconv.ParseFloat(ksutil.JsonRawMessage(nodeMetricsAsStr).Find("node").Find("fs").Find("usedBytes").ToString(), 64)
capacityBytesAsStr, _ := strconv.ParseFloat(ksutil.JsonRawMessage(nodeMetricsAsStr).Find("node").Find("fs").Find("capacityBytes").ToString(), 64)
return fmt.Sprintf("%.1f", usedBytesAsStr/1024/1024/1024), fmt.Sprintf("%.1f", capacityBytesAsStr/1024/1024/1024), fmt.Sprintf("%.3f", usedBytesAsStr/capacityBytesAsStr)
}
return "", "", ""
}
func DrainNode(nodename string) (msg constants.MessageResponse, err error) {
k8sclient := kubeclient.NewK8sClient()
node, err := k8sclient.CoreV1().Nodes().Get(nodename, metav1.GetOptions{})
if err != nil {
glog.Fatal(err)
return msg, err
}
if node.Spec.Unschedulable {
glog.Info(node.Spec.Unschedulable)
msg.Message = fmt.Sprintf("node %s have been drained", nodename)
return msg, nil
}
data := []byte(" {\"spec\":{\"unschedulable\":true}}")
nodestatus, err := k8sclient.CoreV1().Nodes().Patch(nodename, types.StrategicMergePatchType, data)
glog.Info(nodestatus)
if err != nil {
glog.Fatal(err)
return msg, err
}
msg.Message = "success"
return msg, nil
}
func DrainStatus(nodename string) (msg constants.MessageResponse, err error) {
k8sclient := kubeclient.NewK8sClient()
var options metav1.ListOptions
pods := make([]v1.Pod, 0)
options.FieldSelector = "spec.nodeName=" + nodename
podList, err := k8sclient.CoreV1().Pods("").List(options)
if err != nil {
glog.Fatal(err)
return msg, err
}
options.FieldSelector = ""
daemonsetList, err := k8sclient.AppsV1beta2().DaemonSets("").List(options)
if err != nil {
glog.Fatal(err)
return msg, err
}
// remove mirror pod static pod
if len(podList.Items) > 0 {
for _, pod := range podList.Items {
if !containDaemonset(pod, *daemonsetList) {
//static or mirror pod
if isStaticPod(&pod) || isMirrorPod(&pod) {
continue
} else {
pods = append(pods, pod)
}
}
}
}
if len(pods) == 0 {
msg.Message = fmt.Sprintf("success")
return msg, nil
} else {
//create eviction
getPodFn := func(namespace, name string) (*v1.Pod, error) {
k8sclient := kubeclient.NewK8sClient()
return k8sclient.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
evicerr := evictPods(pods, 900, getPodFn)
if evicerr == nil {
msg.Message = fmt.Sprintf("success")
return msg, nil
} else {
glog.Info(evicerr)
msg.Message = evicerr.Error()
return msg, nil
}
}
}
func getPodSource(pod *v1.Pod) (string, error) {
if pod.Annotations != nil {
if source, ok := pod.Annotations["kubernetes.io/config.source"]; ok {
return source, nil
}
}
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
}
func isStaticPod(pod *v1.Pod) bool {
source, err := getPodSource(pod)
return err == nil && source != "api"
}
func isMirrorPod(pod *v1.Pod) bool {
_, ok := pod.Annotations[v1.MirrorPodAnnotationKey]
return ok
}
func containDaemonset(pod v1.Pod, daemonsetList v1beta2.DaemonSetList) bool {
flag := false
for _, daemonset := range daemonsetList.Items {
if strings.Contains(pod.Name, daemonset.Name) {
flag = true
}
}
return flag
}
func evictPod(pod v1.Pod, GracePeriodSeconds int) error {
k8sclient := kubeclient.NewK8sClient()
deleteOptions := &metav1.DeleteOptions{}
if GracePeriodSeconds >= 0 {
gracePeriodSeconds := int64(GracePeriodSeconds)
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
}
var eviction policy.Eviction
eviction.Kind = "Eviction"
eviction.APIVersion = "policy/v1beta1"
eviction.Namespace = pod.Namespace
eviction.Name = pod.Name
eviction.DeleteOptions = deleteOptions
err := k8sclient.CoreV1().Pods(pod.Namespace).Evict(&eviction)
if err != nil {
return err
}
return nil
}
func evictPods(pods []v1.Pod, GracePeriodSeconds int, getPodFn func(namespace, name string) (*v1.Pod, error)) error {
doneCh := make(chan bool, len(pods))
errCh := make(chan error, 1)
for _, pod := range pods {
go func(pod v1.Pod, doneCh chan bool, errCh chan error) {
var err error
for {
err = evictPod(pod, GracePeriodSeconds)
if err == nil {
break
} else if apierrors.IsNotFound(err) {
doneCh <- true
glog.Info(fmt.Sprintf("pod %s evict", pod.Name))
return
} else if apierrors.IsTooManyRequests(err) {
time.Sleep(5 * time.Second)
} else {
errCh <- fmt.Errorf("error when evicting pod %q: %v", pod.Name, err)
return
}
}
podArray := []v1.Pod{pod}
_, err = waitForDelete(podArray, time.Second, time.Duration(math.MaxInt64), getPodFn)
if err == nil {
doneCh <- true
glog.Info(fmt.Sprintf("pod %s delete", pod.Name))
} else {
errCh <- fmt.Errorf("error when waiting for pod %q terminating: %v", pod.Name, err)
}
}(pod, doneCh, errCh)
}
Timeout := GracePeriods * power(10, 9)
doneCount := 0
// 0 timeout means infinite, we use MaxInt64 to represent it.
var globalTimeout time.Duration
if Timeout == 0 {
globalTimeout = time.Duration(math.MaxInt64)
} else {
globalTimeout = time.Duration(Timeout)
}
for {
select {
case err := <-errCh:
return err
case <-doneCh:
doneCount++
if doneCount == len(pods) {
return nil
}
case <-time.After(globalTimeout):
return fmt.Errorf("Drain did not complete within %v, please check node status in a few minutes", globalTimeout)
}
}
}
func waitForDelete(pods []v1.Pod, interval, timeout time.Duration, getPodFn func(string, string) (*v1.Pod, error)) ([]v1.Pod, error) {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
pendingPods := []v1.Pod{}
for i, pod := range pods {
p, err := getPodFn(pod.Namespace, pod.Name)
if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) {
continue
} else if err != nil {
return false, err
} else {
pendingPods = append(pendingPods, pods[i])
}
}
pods = pendingPods
if len(pendingPods) > 0 {
return false, nil
}
return true, nil
})
return pods, err
}
func power(x int64, n int) int64 {
var res int64 = 1
for n != 0 {
res *= x
n--
}
return res
}

242
pkg/models/metrics/pods.go Normal file
View File

@@ -0,0 +1,242 @@
package metrics
import (
"encoding/json"
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apis/meta/v1"
coreV1 "k8s.io/api/core/v1"
"kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
)
type PodMetrics struct {
PodName string `json:"pod_name"`
NameSpace string `json:"namespace"`
NodeName string `json:"node_name"`
CPURequest string `json:"cpu_request"`
CPULimit string `json:"cpu_limit"`
MemoryRequest string `json:"mem_request"`
MemoryLimit string `json:"mem_limit"`
CPU []PodCpuMetrics `json:"cpu"`
Memory []PodMemoryMetrics `json:"memory"`
}
type PodCpuMetrics struct {
TimeStamp string `json:"timestamp"`
UsedCpu string `json:"used_cpu"`
}
type PodMemoryMetrics struct {
TimeStamp string `json:"timestamp"`
UsedMemory string `json:"used_mem"`
}
const Inf = "inf"
/*
Get all namespaces in default cluster
*/
func GetNameSpaces() []string {
namespacesList := client.GetHeapsterMetrics("/namespaces")
var namespaces []string
dec := json.NewDecoder(strings.NewReader(namespacesList))
err := dec.Decode(&namespaces)
if err != nil {
glog.Error(err)
}
return namespaces
}
/*
Get all pods under specified namespace in default cluster
*/
func GetPods(namespace string) []string {
podsList := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods")
var pods []string
dec := json.NewDecoder(strings.NewReader(podsList))
err := dec.Decode(&pods)
if err != nil {
glog.Error(err)
}
return pods
}
func GetSinglePodMetrics(namespace string, podName string, ch chan PodMetrics) {
podMetrics := FormatPodMetrics(namespace, podName)
ch <- podMetrics
}
func GetPodsMetrics(podList *coreV1.PodList) []PodMetrics {
var items []PodMetrics
ch := make(chan PodMetrics)
for _, pod := range podList.Items {
go GetSinglePodMetrics(pod.Namespace, pod.Name, ch)
}
for _, _ = range podList.Items {
items = append(items, <-ch)
}
return items
}
func GetPodMetricsInNamespace(namespace string) constants.PageableResponse {
var podMetrics constants.PageableResponse
k8sClient := client.NewK8sClient()
options := v1.ListOptions{}
podList, _ := k8sClient.CoreV1().Pods(namespace).List(options)
podMetrics.Items = append(podMetrics.Items, GetPodsMetrics(podList))
podMetrics.TotalCount = len(podMetrics.Items)
return podMetrics
}
func GetPodMetricsInNode(nodeName string) constants.PageableResponse {
var podMetrics constants.PageableResponse
k8sClient := client.NewK8sClient()
options := v1.ListOptions{
FieldSelector: "spec.nodeName=" + nodeName,
}
podList, _ := k8sClient.CoreV1().Pods("").List(options)
podMetrics.Items = append(podMetrics.Items, GetPodsMetrics(podList))
podMetrics.TotalCount = len(podMetrics.Items)
return podMetrics
}
func GetPodMetricsInNamespaceOfNode(namespace string, nodeName string) constants.PageableResponse {
var podMetrics constants.PageableResponse
k8sClient := client.NewK8sClient()
options := v1.ListOptions{
FieldSelector: "spec.nodeName=" + nodeName + ",metadata.namespace=" + namespace,
}
podList, _ := k8sClient.CoreV1().Pods("").List(options)
podMetrics.Items = append(podMetrics.Items, GetPodsMetrics(podList))
podMetrics.TotalCount = len(podMetrics.Items)
return podMetrics
}
func GetAllPodMetrics() constants.PageableResponse {
var podMetrics constants.PageableResponse
k8sClient := client.NewK8sClient()
options := v1.ListOptions{}
podList, _ := k8sClient.CoreV1().Pods("").List(options)
podMetrics.Items = append(podMetrics.Items, GetPodsMetrics(podList))
podMetrics.TotalCount = len(podMetrics.Items)
return podMetrics
}
func FormatPodMetrics(namespace string, pod string) PodMetrics {
var resultPod PodMetrics
var podCPUMetrics []PodCpuMetrics
var podMemMetrics []PodMemoryMetrics
var cpuMetrics PodCpuMetrics
var memoryMetrics PodMemoryMetrics
resultPod.PodName = pod
resultPod.NameSpace = namespace
cpuRequest := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/request")
cpuRequestMetrics, err := cpuRequest.GetObjectArray("metrics")
if err != nil {
glog.Error(err)
} else {
if len(cpuRequestMetrics) == 0 {
resultPod.CPURequest = Inf
} else {
data, err := cpuRequestMetrics[0].GetNumber("value")
if err != nil {
glog.Error(err)
}
resultPod.CPURequest = data.String()
}
}
cpuLimit := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/limit")
cpuLimitMetrics, err := cpuLimit.GetObjectArray("metrics")
if len(cpuLimitMetrics) == 0 {
resultPod.CPULimit = Inf
} else {
data, _ := cpuLimitMetrics[0].GetNumber("value")
resultPod.CPULimit = data.String()
}
memoryRequest := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/request")
memoryRequestMetrics, err := memoryRequest.GetObjectArray("metrics")
if err != nil {
glog.Error(err)
}
if len(memoryRequestMetrics) == 0 {
resultPod.MemoryRequest = Inf
} else {
data, _ := memoryRequestMetrics[0].GetNumber("value")
resultPod.MemoryRequest = data.String()
}
memoryLimit := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/limit")
memoryLimitMetrics, err := memoryLimit.GetObjectArray("metrics")
if err != nil || len(memoryLimitMetrics) == 0 {
resultPod.MemoryLimit = Inf
} else {
data, _ := memoryLimitMetrics[0].GetNumber("value")
resultPod.MemoryLimit = data.String()
}
cpuUsageRate := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/usage_rate")
cpuUsageRateMetrics, _ := cpuUsageRate.GetObjectArray("metrics")
for _, cpuUsageRateMetric := range cpuUsageRateMetrics {
timestamp, _ := cpuUsageRateMetric.GetString("timestamp")
cpuUsageRate, _ := cpuUsageRateMetric.GetFloat64("value")
cpuMetrics.TimeStamp = timestamp
cpuMetrics.UsedCpu = fmt.Sprintf("%.1f", cpuUsageRate)
podCPUMetrics = append(podCPUMetrics, cpuMetrics)
}
resultPod.CPU = podCPUMetrics
memUsage := client.GetHeapsterMetricsJson("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/usage")
memoryUsageMetrics, err := memUsage.GetObjectArray("metrics")
for _, memoryUsageMetric := range memoryUsageMetrics {
timestamp, _ := memoryUsageMetric.GetString("timestamp")
memoryMetrics.TimeStamp = timestamp
usedMemoryBytes, err := memoryUsageMetric.GetFloat64("value")
if err == nil {
memoryMetrics.UsedMemory = fmt.Sprintf("%.1f", usedMemoryBytes/1024/1024)
} else {
memoryMetrics.UsedMemory = Inf
}
podMemMetrics = append(podMemMetrics, memoryMetrics)
}
resultPod.Memory = podMemMetrics
return resultPod
}
func GetNodeNameForPod(podName, namespace string) string {
var nodeName string
cli := client.NewK8sClient()
pod, err := cli.CoreV1().Pods(namespace).Get(podName, v1.GetOptions{})
if err != nil {
glog.Error(err)
} else {
nodeName = pod.Spec.NodeName
}
return nodeName
}

View File

@@ -1,309 +0,0 @@
/*
Copyright 2018 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/golang/glog"
v1beta2 "k8s.io/api/apps/v1beta2"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
"kubesphere.io/kubesphere/pkg/client"
kubeclient "kubesphere.io/kubesphere/pkg/client"
"kubesphere.io/kubesphere/pkg/constants"
ksutil "kubesphere.io/kubesphere/pkg/util"
)
const (
//status: "False"
OutOfDisk = "OutOfDisk"
//status: "False"
MemoryPressure = "MemoryPressure"
//status: "False"
DiskPressure = "DiskPressure"
//status: "False"
PIDPressure = "PIDPressure"
//status: "True"
KubeletReady = "Ready"
)
type ResultNode struct {
NodeName string `json:"node_name"`
NodeStatus string `json:"node_status"`
PodsCount string `json:"pods_count"`
PodsCapacity string `json:"pods_capacity"`
UsedFS string `json:"used_fs"`
TotalFS string `json:"total_fs"`
FSUtilization string `json:"fs_utilization"`
CPU []CPUNode `json:"cpu"`
Memory []MemoryNode `json:"memory"`
}
type CPUNode struct {
TimeStamp string `json:"timestamp"`
UsedCPU string `json:"used_cpu"`
TotalCPU string `json:"total_cpu"`
CPUUtilization string `json:"cpu_utilization"`
}
type MemoryNode struct {
TimeStamp string `json:"timestamp"`
UsedMemory string `json:"used_mem"`
TotalMemory string `json:"total_mem"`
MemoryUtilization string `json:"mem_utilization"`
}
/*
Get all nodes in default cluster
*/
func GetNodes() []string {
nodesList := client.GetHeapsterMetrics("/nodes")
var nodes []string
dec := json.NewDecoder(strings.NewReader(nodesList))
err := dec.Decode(&nodes)
if err != nil {
glog.Error(err)
}
return nodes
}
/*
Format cpu/memory data for specified node
*/
func FormatNodeMetrics(nodeName string) ResultNode {
var resultNode ResultNode
var nodeCPUMetrics []CPUNode
var nodeMemMetrics []MemoryNode
var cpuMetrics CPUNode
var memMetrics MemoryNode
var total_cpu float64
var total_mem float64
cpuNodeAllocated := client.GetHeapsterMetrics("/nodes/" + nodeName + "/metrics/cpu/node_allocatable")
if cpuNodeAllocated != "" {
var err error
total_cpu, err = strconv.ParseFloat(ksutil.JsonRawMessage(cpuNodeAllocated).Find("metrics").ToList()[0].Find("value").ToString(), 64)
if err == nil {
total_cpu = total_cpu / 1000
}
}
cpuUsageRate := client.GetHeapsterMetrics("/nodes/" + nodeName + "/metrics/cpu/usage_rate")
if cpuUsageRate != "" {
metrics := ksutil.JsonRawMessage(cpuUsageRate).Find("metrics").ToList()
for _, metric := range metrics {
timestamp := metric.Find("timestamp")
cpu_utilization, _ := strconv.ParseFloat(ConvertCPUUsageRate(metric.Find("value").ToString()), 64)
cpuMetrics.TimeStamp = timestamp.ToString()
cpuMetrics.TotalCPU = fmt.Sprintf("%.1f", total_cpu)
cpuMetrics.CPUUtilization = fmt.Sprintf("%.3f", cpu_utilization)
cpuMetrics.UsedCPU = fmt.Sprintf("%.1f", total_cpu*cpu_utilization)
glog.Info("node " + nodeName + " has total cpu " + fmt.Sprintf("%.1f", total_cpu) + " CPU utilization " + fmt.Sprintf("%.3f", cpu_utilization) + " at time" + timestamp.ToString())
nodeCPUMetrics = append(nodeCPUMetrics, cpuMetrics)
}
}
memNodeAllocated := client.GetHeapsterMetrics("/nodes/" + nodeName + "/metrics/memory/node_allocatable")
var total_mem_bytes, used_mem_bytes float64
if memNodeAllocated != "" {
var err error
total_mem_bytes, err = strconv.ParseFloat(ksutil.JsonRawMessage(memNodeAllocated).Find("metrics").ToList()[0].Find("value").ToString(), 64)
if err == nil {
total_mem = total_mem_bytes / 1024 / 1024 / 1024
}
}
memUsage := client.GetHeapsterMetrics("/nodes/" + nodeName + "/metrics/memory/usage")
if memUsage != "" {
metrics := ksutil.JsonRawMessage(memUsage).Find("metrics").ToList()
for _, metric := range metrics {
timestamp := metric.Find("timestamp")
used_mem_bytes, _ = strconv.ParseFloat(metric.Find("value").ToString(), 64)
used_mem := used_mem_bytes / 1024 / 1024 / 1024
memMetrics.TimeStamp = timestamp.ToString()
memMetrics.TotalMemory = fmt.Sprintf("%.1f", total_mem)
memMetrics.UsedMemory = fmt.Sprintf("%.1f", used_mem)
memMetrics.MemoryUtilization = fmt.Sprintf("%.3f", used_mem_bytes/total_mem_bytes)
glog.Info("node " + nodeName + " has total mem " + fmt.Sprintf("%.1f", total_mem) + " mem utilization " + fmt.Sprintf("%.3f", used_mem_bytes/total_mem_bytes) + " at time" + timestamp.ToString())
nodeMemMetrics = append(nodeMemMetrics, memMetrics)
}
}
resultNode.NodeName = nodeName
resultNode.PodsCount = strconv.Itoa(len(GetPodsForNode(nodeName, "")))
nodeResObj := getNodeResObj(nodeName)
resultNode.PodsCapacity = nodeResObj.Status.Capacity.Pods().String()
resultNode.NodeStatus = getNodeStatus(nodeResObj)
resultNode.UsedFS, resultNode.TotalFS, resultNode.FSUtilization = getNodeFileSystemStatus(nodeResObj)
resultNode.CPU = nodeCPUMetrics
resultNode.Memory = nodeMemMetrics
return resultNode
}
func getNodeResObj(nodeName string) *v1.Node {
cli := client.NewK8sClient()
node, err := cli.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
glog.Error(err)
} else {
return node
}
return nil
}
func getNodeStatus(node *v1.Node) string {
status := "Ready"
conditions := node.Status.Conditions
for _, cond := range conditions {
if cond.Type == DiskPressure && cond.Status == "True" {
status = "NotReady"
break
}
if cond.Type == OutOfDisk && cond.Status == "True" {
status = "NotReady"
break
}
if cond.Type == MemoryPressure && cond.Status == "True" {
status = "NotReady"
}
if cond.Type == PIDPressure && cond.Status == "True" {
status = "NotReady"
break
}
if cond.Type == KubeletReady && cond.Status == "False" {
status = "NotReady"
break
}
}
return status
}
func getNodeFileSystemStatus(node *v1.Node) (string, string, string) {
nodeMetricsAsStr := client.GetCAdvisorMetrics(node.Annotations["alpha.kubernetes.io/provided-node-ip"])
if nodeMetricsAsStr != "" {
usedBytesAsStr, _ := strconv.ParseFloat(ksutil.JsonRawMessage(nodeMetricsAsStr).Find("node").Find("fs").Find("usedBytes").ToString(), 64)
capacityBytesAsStr, _ := strconv.ParseFloat(ksutil.JsonRawMessage(nodeMetricsAsStr).Find("node").Find("fs").Find("capacityBytes").ToString(), 64)
return fmt.Sprintf("%.1f", usedBytesAsStr/1024/1024/1024), fmt.Sprintf("%.1f", capacityBytesAsStr/1024/1024/1024), fmt.Sprintf("%.3f", usedBytesAsStr/capacityBytesAsStr)
}
return "", "", ""
}
func DrainNode(nodename string) (msg constants.MessageResponse, err error) {
k8sclient := kubeclient.NewK8sClient()
var options metav1.ListOptions
pods := make([]v1.Pod, 0)
options.FieldSelector = "spec.nodeName=" + nodename
podList, err := k8sclient.CoreV1().Pods("").List(options)
if err != nil {
glog.Fatal(err)
return msg, err
}
options.FieldSelector = ""
daemonsetList, err := k8sclient.AppsV1beta2().DaemonSets("").List(options)
if err != nil {
glog.Fatal(err)
return msg, err
}
if len(podList.Items) > 0 {
for _, pod := range podList.Items {
if !containDaemonset(pod, *daemonsetList) {
pods = append(pods, pod)
}
}
}
//create eviction
var eviction policy.Eviction
eviction.Kind = "Eviction"
eviction.APIVersion = "policy/v1beta1"
if len(pods) > 0 {
for _, pod := range pods {
eviction.Namespace = pod.Namespace
eviction.Name = pod.Name
err := k8sclient.CoreV1().Pods(pod.Namespace).Evict(&eviction)
if err != nil {
return msg, err
}
}
}
data := []byte(" {\"spec\":{\"unschedulable\":true}}")
nodestatus, err := k8sclient.CoreV1().Nodes().Patch(nodename, types.StrategicMergePatchType, data)
if err != nil {
glog.Fatal(err)
return msg, err
}
if nodestatus.Spec.Unschedulable {
msg.Message = fmt.Sprintf("success")
} else {
glog.Fatal(err)
return msg, err
}
return msg, nil
}
func containDaemonset(pod v1.Pod, daemonsetList v1beta2.DaemonSetList) bool {
flag := false
for _, daemonset := range daemonsetList.Items {
if strings.Contains(pod.Name, daemonset.Name) {
flag = true
}
}
return flag
}

View File

@@ -1,283 +0,0 @@
package models
import (
"encoding/json"
"strings"
"github.com/golang/glog"
"kubesphere.io/kubesphere/pkg/client"
ksutil "kubesphere.io/kubesphere/pkg/util"
"fmt"
"strconv"
"math"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"kubesphere.io/kubesphere/pkg/constants"
)
type ResultPod struct {
PodName string `json:"pod_name"`
NameSpace string `json:"namespace"`
NodeName string `json:"node_name"`
CPURequest string `json:"cpu_request"`
CPULimit string `json:"cpu_limit"`
MemoryRequest string `json:"mem_request"`
MemoryLimit string `json:"mem_limit"`
CPU []CPUPod `json:"cpu"`
Memory []MemoryPod `json:"memory"`
}
type CPUPod struct {
TimeStamp string `json:"timestamp"`
UsedCPU string `json:"used_cpu"`
CPUUtilization string `json:"cpu_utilization"`
}
type MemoryPod struct {
TimeStamp string `json:"timestamp"`
UsedMemory string `json:"used_mem"`
MemoryUtilization string `json:"mem_utilization"`
}
/*
Get all namespaces in default cluster
*/
func GetNameSpaces() []string {
namespacesList := client.GetHeapsterMetrics("/namespaces")
var namespaces []string
dec := json.NewDecoder(strings.NewReader(namespacesList))
err := dec.Decode(&namespaces)
if err != nil {
glog.Error(err)
}
return namespaces
}
/*
Get all pods under specified namespace in default cluster
*/
func GetPods(namespace string) []string {
podsList := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods")
var pods []string
dec := json.NewDecoder(strings.NewReader(podsList))
err := dec.Decode(&pods)
if err != nil {
glog.Error(err)
}
return pods
}
func GetPodsForNode(nodeName, namespace string) []string {
var pods []string
cli := client.NewK8sClient()
podList, err := cli.CoreV1().Pods(namespace).List(v1.ListOptions{FieldSelector: "spec.nodeName=" + nodeName})
if err != nil {
glog.Error(err)
} else {
for _, pod := range podList.Items {
pods = append(pods, pod.Name)
}
}
return pods
}
func FormatPodsMetrics(nodeName, namespace string) constants.PageableResponse {
var result constants.PageableResponse
var resultPod ResultPod
var pods []string
if nodeName == "" {
pods = GetPods(namespace)
} else {
pods = GetPodsForNode(nodeName, namespace)
}
var total_count int
for i, pod := range pods {
resultPod = FormatPodMetrics(namespace, pod)
if nodeName != "" {
resultPod.NodeName = nodeName
} else {
resultPod.NodeName = GetNodeNameForPod(pod, namespace)
}
result.Items = append(result.Items, resultPod)
total_count = i
}
result.TotalCount = total_count + 1
return result
}
func FormatPodMetrics(namespace, pod string) ResultPod {
var resultPod ResultPod
var podCPUMetrics []CPUPod
var podMemMetrics []MemoryPod
var cpuMetrics CPUPod
var memMetrics MemoryPod
resultPod.PodName = pod
resultPod.NameSpace = namespace
cpuRequest := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/request")
cpuRequest = ksutil.JsonRawMessage(cpuRequest).Find("metrics").ToList()[0].Find("value").ToString()
if cpuRequest != "" && cpuRequest != "0" {
resultPod.CPURequest = cpuRequest
} else {
resultPod.CPURequest = "inf"
}
cpuLimit := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/limit")
cpuLimit = ksutil.JsonRawMessage(cpuLimit).Find("metrics").ToList()[0].Find("value").ToString()
if cpuLimit != "" && cpuLimit != "0" {
resultPod.CPULimit = cpuLimit
} else {
resultPod.CPULimit = "inf"
}
memoryRequest := ksutil.JsonRawMessage(client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/request")).Find("metrics").ToList()[0].Find("value").ToString()
resultPod.MemoryRequest = ConvertMemory(memoryRequest)
memoryLimit := ksutil.JsonRawMessage(client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/limit")).Find("metrics").ToList()[0].Find("value").ToString()
resultPod.MemoryLimit = ConvertMemory(memoryLimit)
cpuUsageRate := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + pod + "/metrics/cpu/usage_rate")
if cpuUsageRate != "" {
metrics := ksutil.JsonRawMessage(cpuUsageRate).Find("metrics").ToList()
for _, metric := range metrics {
timestamp := metric.Find("timestamp")
cpu_utilization, _ := strconv.ParseFloat(ConvertCPUUsageRate(metric.Find("value").ToString()), 64)
cpuMetrics.TimeStamp = timestamp.ToString()
cpuMetrics.CPUUtilization = fmt.Sprintf("%.3f", cpu_utilization)
if resultPod.CPULimit != "inf" {
cpu_limit, _ := strconv.ParseFloat(resultPod.CPULimit, 64)
cpuMetrics.UsedCPU = fmt.Sprintf("%.1f", cpu_limit*cpu_utilization)
} else {
cpuMetrics.UsedCPU = "inf"
}
glog.Info("pod " + pod + " has limit cpu " + resultPod.CPULimit + " CPU utilization " + fmt.Sprintf("%.3f", cpu_utilization) + " at time" + timestamp.ToString())
podCPUMetrics = append(podCPUMetrics, cpuMetrics)
}
}
resultPod.CPU = podCPUMetrics
var used_mem_bytes float64
memUsage := client.GetHeapsterMetrics("/namespaces/" + namespace + "/pods/" + pod + "/metrics/memory/usage")
if memUsage != "" {
metrics := ksutil.JsonRawMessage(memUsage).Find("metrics").ToList()
for _, metric := range metrics {
timestamp := metric.Find("timestamp")
used_mem_bytes, _ = strconv.ParseFloat(metric.Find("value").ToString(), 64)
used_mem := used_mem_bytes / 1024 / 1024
memMetrics.TimeStamp = timestamp.ToString()
memMetrics.UsedMemory = fmt.Sprintf("%.1f", used_mem)
memMetrics.MemoryUtilization = fmt.Sprintf("%.3f", CalculateMemoryUsage(memoryRequest, memoryLimit, metric.Find("value").ToString()))
glog.Info("pod " + pod + " has limit mem " + resultPod.MemoryLimit + " mem utilization " + memMetrics.MemoryUtilization + " at time" + timestamp.ToString())
podMemMetrics = append(podMemMetrics, memMetrics)
}
}
resultPod.Memory = podMemMetrics
return resultPod
}
func ConvertMemory(memBytes string) string {
var mem string
if memBytes != "" {
if memBytes != "" && memBytes != "0" {
memBytes, error := strconv.ParseFloat(memBytes, 64)
if error == nil {
mem = fmt.Sprintf("%.3f", memBytes/1024/1024)
} else {
mem = "inf"
}
} else {
mem = "inf"
}
} else {
mem = "inf"
}
return mem
}
func CalculateMemoryUsage(requestMem, limitMem, usedMem string) float64 {
var requestMemInBytes, limitMemInBytes, usedMemInBytes, memUsage float64
if requestMem != "" && requestMem != "0" && requestMem != "inf" {
requestMemInBytes, _ = strconv.ParseFloat(requestMem, 64)
} else {
glog.Info("memory request is not set")
requestMemInBytes = 0
}
if limitMem != "" && limitMem != "0" && limitMem != "inf" {
limitMemInBytes, _ = strconv.ParseFloat(limitMem, 64)
} else {
glog.Info("memory limit is not set")
limitMemInBytes = 0
}
if usedMem != "" && usedMem != "0" {
usedMemInBytes, _ = strconv.ParseFloat(usedMem, 64)
} else {
usedMemInBytes = 0
}
if usedMemInBytes > 0 {
if requestMemInBytes > 0 && limitMemInBytes > 0 {
if usedMemInBytes > requestMemInBytes {
glog.Info("used memory is higher than memory request")
memUsage = usedMemInBytes / limitMemInBytes
} else {
memUsage = usedMemInBytes / requestMemInBytes
}
} else if requestMemInBytes > 0 && limitMemInBytes == 0 {
if usedMemInBytes > requestMemInBytes {
glog.Info("used memory is higher than memory request")
memUsage = 0
} else {
memUsage = usedMemInBytes / requestMemInBytes
}
} else if requestMemInBytes == 0 && limitMemInBytes > 0 {
if usedMemInBytes <= limitMemInBytes {
memUsage = usedMemInBytes / limitMemInBytes
}
} else {
memUsage = 0
}
} else {
memUsage = 0
}
return memUsage
}
func ConvertCPUUsageRate(cpuUsageRate string) string {
if cpuUsageRate != "" && cpuUsageRate != "0" {
rate, _ := strconv.ParseFloat(cpuUsageRate, 64)
rateBase := math.Pow10(strings.Count(cpuUsageRate, "") - 1)
return fmt.Sprintf("%.3f", rate/rateBase)
} else {
return "0"
}
}
func GetNodeNameForPod(podName, namespace string) string {
var nodeName string
cli := client.NewK8sClient()
pod, err := cli.CoreV1().Pods(namespace).Get(podName, v1.GetOptions{})
if err != nil {
glog.Error(err)
} else {
nodeName = pod.Spec.NodeName
}
return nodeName
}

View File

@@ -18,12 +18,10 @@ package models
import (
"io/ioutil"
"strings"
"github.com/golang/glog"
coreV1 "k8s.io/api/core/v1"
extensionsV1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/api/rbac/v1beta1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
@@ -31,40 +29,22 @@ import (
"kubesphere.io/kubesphere/pkg/constants"
)
func GetAllRouters() ([]*coreV1.Service, error) {
func GetAllRouters() ([]coreV1.Service, error) {
k8sClient := client.NewK8sClient()
routers := make([]*coreV1.Service, 0)
opts := metaV1.ListOptions{
LabelSelector: "app=kubesphere,component=kubesphere-router-gateway",
}
opts := metaV1.ListOptions{}
namespaces, err := k8sClient.CoreV1().Namespaces().List(opts)
services, err := k8sClient.CoreV1().Services(constants.IngressControllerNamespace).List(opts)
if err != nil {
glog.Error(err)
return routers, err
return nil, err
}
opts = metaV1.ListOptions{
LabelSelector: "app=kubesphere,component=kubesphere-router",
FieldSelector: "metadata.name=kubesphere-router-gateway",
}
for _, namespace := range namespaces.Items {
services, err := k8sClient.CoreV1().Services(namespace.Name).List(opts)
if err != nil {
glog.Error(err)
return nil, err
}
if len(services.Items) > 0 {
routers = append(routers, &services.Items[0])
}
}
return routers, nil
return services.Items, nil
}
// Get router from a namespace
@@ -73,12 +53,14 @@ func GetRouter(namespace string) (*coreV1.Service, error) {
var router *coreV1.Service
serviceName := constants.IngressControllerPrefix + namespace
opts := metaV1.ListOptions{
LabelSelector: "app=kubesphere,component=kubesphere-router",
FieldSelector: "metadata.name=kubesphere-router-gateway",
LabelSelector: "app=kubesphere,component=kubesphere-router-gateway",
FieldSelector: "metadata.name=" + serviceName,
}
services, err := k8sClient.CoreV1().Services(namespace).List(opts)
services, err := k8sClient.CoreV1().Services(constants.IngressControllerNamespace).List(opts)
if err != nil {
glog.Error(err)
@@ -97,14 +79,14 @@ func LoadYamls() ([]string, error) {
var yamls []string
files, err := ioutil.ReadDir(constants.INGRESS_CONTROLLER_FOLDER)
files, err := ioutil.ReadDir(constants.IngressControllerFolder)
if err != nil {
glog.Error(err)
return nil, err
}
for _, file := range files {
content, err := ioutil.ReadFile(constants.INGRESS_CONTROLLER_FOLDER + "/" + file.Name())
content, err := ioutil.ReadFile(constants.IngressControllerFolder + "/" + file.Name())
if err != nil {
glog.Error(err)
@@ -117,13 +99,6 @@ func LoadYamls() ([]string, error) {
return yamls, nil
}
func IsRouterService(serviceName string) bool {
if strings.Compare(strings.ToLower(serviceName), "default-http-backend") == 0 {
return false
}
return true
}
// Create a ingress controller in a namespace
func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations map[string]string) (*coreV1.Service, error) {
@@ -147,62 +122,27 @@ func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations m
}
switch obj.(type) {
case *v1beta1.Role:
role := obj.(*v1beta1.Role)
role, err := k8sClient.RbacV1beta1().Roles(namespace).Create(role)
if err != nil {
glog.Error(err)
}
case *v1beta1.ClusterRole:
clusterRole := obj.(*v1beta1.ClusterRole)
clusterRole, err := k8sClient.RbacV1beta1().ClusterRoles().Create(clusterRole)
if err != nil {
glog.Error(err)
}
case *v1beta1.ClusterRoleBinding:
clusterRoleBinding := obj.(*v1beta1.ClusterRoleBinding)
clusterRoleBinding.Subjects[0].Namespace = namespace
clusterRoleBinding, err := k8sClient.RbacV1beta1().ClusterRoleBindings().Create(clusterRoleBinding)
if err != nil {
glog.Error(err)
}
case *v1beta1.RoleBinding:
roleBinding := obj.(*v1beta1.RoleBinding)
roleBinding.Subjects[0].Namespace = namespace
roleBinding, err := k8sClient.RbacV1beta1().RoleBindings(namespace).Create(roleBinding)
if err != nil {
glog.Error(err)
}
case *coreV1.ServiceAccount:
sa := obj.(*coreV1.ServiceAccount)
sa, err := k8sClient.CoreV1().ServiceAccounts(namespace).Create(sa)
if err != nil {
glog.Error(err)
}
case *coreV1.Service:
service := obj.(*coreV1.Service)
if IsRouterService(service.Name) {
service.SetAnnotations(annotations)
service.Spec.Type = routerType
}
service.SetAnnotations(annotations)
service.Spec.Type = routerType
service.Name = constants.IngressControllerPrefix + namespace
service, err := k8sClient.CoreV1().Services(namespace).Create(service)
service, err := k8sClient.CoreV1().Services(constants.IngressControllerNamespace).Create(service)
if err != nil {
glog.Error(err)
return nil, err
}
if IsRouterService(service.Name) {
router = service
}
router = service
case *extensionsV1beta1.Deployment:
deployment := obj.(*extensionsV1beta1.Deployment)
deployment, err := k8sClient.ExtensionsV1beta1().Deployments(namespace).Create(deployment)
deployment.Name = constants.IngressControllerPrefix + namespace
deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--watch-namespace="+namespace)
glog.Info(deployment.Spec.Template.Spec.Containers[0].Args)
deployment, err := k8sClient.ExtensionsV1beta1().Deployments(constants.IngressControllerNamespace).Create(deployment)
if err != nil {
glog.Error(err)
}
@@ -219,74 +159,55 @@ func CreateRouter(namespace string, routerType coreV1.ServiceType, annotations m
func DeleteRouter(namespace string) (*coreV1.Service, error) {
k8sClient := client.NewK8sClient()
var err error
var router *coreV1.Service
yamls, err := LoadYamls()
if err != nil {
glog.Error(err)
}
for _, f := range yamls {
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode([]byte(f), nil, nil)
// delete controller service
serviceName := constants.IngressControllerPrefix + namespace
deleteOptions := metaV1.DeleteOptions{}
listOptions := metaV1.ListOptions{
LabelSelector: "app=kubesphere,component=kubesphere-router-gateway",
FieldSelector: "metadata.name=" + serviceName}
serviceList, err := k8sClient.CoreV1().Services(constants.IngressControllerNamespace).List(listOptions)
if err != nil {
glog.Error(err)
}
if len(serviceList.Items) > 0 {
router = &serviceList.Items[0]
err = k8sClient.CoreV1().Services(constants.IngressControllerNamespace).Delete(serviceName, &deleteOptions)
if err != nil {
glog.Error(err)
}
}
// delete controller deployment
deploymentName := constants.IngressControllerPrefix + namespace
listOptions = metaV1.ListOptions{
FieldSelector: "metadata.name=" + deploymentName,
}
deployments, err := k8sClient.ExtensionsV1beta1().Deployments(constants.IngressControllerNamespace).List(listOptions)
if err != nil {
glog.Error(err)
}
if len(deployments.Items) > 0 {
err = k8sClient.ExtensionsV1beta1().Deployments(constants.IngressControllerNamespace).Delete(deploymentName, &deleteOptions)
if err != nil {
glog.Error(err)
return router, err
}
options := metaV1.DeleteOptions{}
switch obj.(type) {
case *v1beta1.Role:
role := obj.(*v1beta1.Role)
err = k8sClient.RbacV1beta1().Roles(namespace).Delete(role.Name, &options)
if err != nil {
glog.Error(err)
}
case *v1beta1.ClusterRoleBinding:
clusterRoleBinding := obj.(*v1beta1.ClusterRoleBinding)
err = k8sClient.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBinding.Name, &options)
if err != nil {
glog.Error(err)
}
case *v1beta1.RoleBinding:
roleBinding := obj.(*v1beta1.RoleBinding)
err = k8sClient.RbacV1beta1().RoleBindings(namespace).Delete(roleBinding.Name, &options)
if err != nil {
glog.Error(err)
}
case *coreV1.ServiceAccount:
sa := obj.(*coreV1.ServiceAccount)
err = k8sClient.CoreV1().ServiceAccounts(namespace).Delete(sa.Name, &options)
if err != nil {
glog.Error(err)
}
case *coreV1.Service:
service := obj.(*coreV1.Service)
err = k8sClient.CoreV1().Services(namespace).Delete(service.Name, &options)
if err != nil {
glog.Error(err)
}
if IsRouterService(service.Name) {
router = service
}
case *extensionsV1beta1.Deployment:
deployment := obj.(*extensionsV1beta1.Deployment)
err = k8sClient.ExtensionsV1beta1().Deployments(namespace).Delete(deployment.Name, &options)
if err != nil {
glog.Error(err)
}
default:
//glog.Info("Default resource")
}
}
return router, nil
}
// Update Ingress Controller Service, change type from NodePort to Loadbalancer or vice versa.
@@ -305,7 +226,7 @@ func UpdateRouter(namespace string, routerType coreV1.ServiceType, annotations m
router.Spec.Type = routerType
router.SetAnnotations(annotations)
router, err = k8sClient.CoreV1().Services(namespace).Update(router)
router, err = k8sClient.CoreV1().Services(constants.IngressControllerNamespace).Update(router)
if err != nil {
glog.Error(err)

View File

@@ -47,11 +47,12 @@ func GetPvcListBySc(storageclass string) (res []v12.PersistentVolumeClaim, err e
// Select persistent volume claims which
// storage class name is equal to the specific storage class.
for _, claim := range claimList.Items {
if claim.Spec.StorageClassName != nil &&
*claim.Spec.StorageClassName == storageclass {
if claim.Spec.StorageClassName != nil {
if *claim.Spec.StorageClassName == storageclass {
res = append(res, claim)
}
} else if claim.GetAnnotations()[v12.BetaStorageClassAnnotation] == storageclass {
res = append(res, claim)
} else {
continue
}
}
return res, nil