add feature: logging and monitoring
Signed-off-by: huanggze <loganhuang@yunify.com>
This commit is contained in:
638
pkg/simple/client/elasticsearch/esclient.go
Normal file
638
pkg/simple/client/elasticsearch/esclient.go
Normal file
@@ -0,0 +1,638 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package esclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
var jsonIter = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
esConfigs *ESConfigs
|
||||
)
|
||||
|
||||
type ESConfigs struct {
|
||||
Host string
|
||||
Port string
|
||||
Index string
|
||||
}
|
||||
|
||||
func readESConfigs() *ESConfigs {
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
return esConfigs
|
||||
}
|
||||
|
||||
func (configs *ESConfigs) WriteESConfigs() {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
esConfigs = configs
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
From int64 `json:"from"`
|
||||
Size int64 `json:"size"`
|
||||
Sorts []Sort `json:"sort,omitempty"`
|
||||
MainQuery MainQuery `json:"query"`
|
||||
Aggs interface{} `json:"aggs,omitempty"`
|
||||
MainHighLight MainHighLight `json:"highlight,omitempty"`
|
||||
}
|
||||
|
||||
type Sort struct {
|
||||
Order Order `json:"time"`
|
||||
}
|
||||
|
||||
type Order struct {
|
||||
Order string `json:"order"`
|
||||
}
|
||||
|
||||
type MainQuery struct {
|
||||
MainBoolQuery MainBoolQuery `json:"bool"`
|
||||
}
|
||||
|
||||
type MainBoolQuery struct {
|
||||
MainFilter MainFilter `json:"filter"`
|
||||
}
|
||||
|
||||
type MainFilter struct {
|
||||
FilterBoolQuery FilterBoolQuery `json:"bool"`
|
||||
}
|
||||
|
||||
type FilterBoolQuery struct {
|
||||
Musts []interface{} `json:"must"`
|
||||
}
|
||||
|
||||
type RangeQuery struct {
|
||||
RangeSpec RangeSpec `json:"range"`
|
||||
}
|
||||
|
||||
type RangeSpec struct {
|
||||
TimeRange TimeRange `json:"time"`
|
||||
}
|
||||
|
||||
type TimeRange struct {
|
||||
Gte string `json:"gte,omitempty"`
|
||||
Lte string `json:"lte,omitempty"`
|
||||
}
|
||||
|
||||
type BoolShouldMatchPhrase struct {
|
||||
ShouldMatchPhrase ShouldMatchPhrase `json:"bool"`
|
||||
}
|
||||
|
||||
type ShouldMatchPhrase struct {
|
||||
Shoulds []interface{} `json:"should"`
|
||||
MinimumShouldMatch int64 `json:"minimum_should_match"`
|
||||
}
|
||||
|
||||
type MatchPhrase struct {
|
||||
MatchPhrase interface{} `json:"match_phrase"`
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
Match interface{} `json:"match"`
|
||||
}
|
||||
|
||||
type QueryWord struct {
|
||||
Word string `json:"query"`
|
||||
}
|
||||
|
||||
type MainHighLight struct {
|
||||
Fields []interface{} `json:"fields,omitempty"`
|
||||
}
|
||||
|
||||
type LogHighLightField struct {
|
||||
FieldContent EmptyField `json:"log"`
|
||||
}
|
||||
|
||||
type NamespaceHighLightField struct {
|
||||
FieldContent EmptyField `json:"kubernetes.namespace_name.keyword"`
|
||||
}
|
||||
|
||||
type PodHighLightField struct {
|
||||
FieldContent EmptyField `json:"kubernetes.pod_name.keyword"`
|
||||
}
|
||||
|
||||
type ContainerHighLightField struct {
|
||||
FieldContent EmptyField `json:"kubernetes.container_name.keyword"`
|
||||
}
|
||||
|
||||
type EmptyField struct {
|
||||
}
|
||||
|
||||
type StatisticsAggs struct {
|
||||
NamespaceAgg NamespaceAgg `json:"Namespace"`
|
||||
}
|
||||
|
||||
type NamespaceAgg struct {
|
||||
Terms StatisticsAggTerm `json:"terms"`
|
||||
ContainerAggs ContainerAggs `json:"aggs"`
|
||||
}
|
||||
|
||||
type ContainerAggs struct {
|
||||
ContainerAgg ContainerAgg `json:"Container"`
|
||||
}
|
||||
|
||||
type ContainerAgg struct {
|
||||
Terms StatisticsAggTerm `json:"terms"`
|
||||
}
|
||||
|
||||
type StatisticsAggTerm struct {
|
||||
Field string `json:"field"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type HistogramAggs struct {
|
||||
HistogramAgg HistogramAgg `json:"histogram"`
|
||||
}
|
||||
|
||||
type HistogramAgg struct {
|
||||
DateHistogram DateHistogram `json:"date_histogram"`
|
||||
}
|
||||
|
||||
type DateHistogram struct {
|
||||
Field string `json:"field"`
|
||||
Interval string `json:"interval"`
|
||||
}
|
||||
|
||||
func createQueryRequest(param QueryParameters) (int, []byte, error) {
|
||||
var request Request
|
||||
var mainBoolQuery MainBoolQuery
|
||||
|
||||
if param.NamespaceFilled {
|
||||
var shouldMatchPhrase ShouldMatchPhrase
|
||||
if len(param.Namespaces) == 0 {
|
||||
matchPhrase := MatchPhrase{map[string]interface{}{"kubernetes.namespace_name.key_word": QueryWord{""}}}
|
||||
shouldMatchPhrase.Shoulds = append(shouldMatchPhrase.Shoulds, matchPhrase)
|
||||
} else {
|
||||
for _, namespace := range param.Namespaces {
|
||||
matchPhrase := MatchPhrase{map[string]interface{}{"kubernetes.namespace_name.keyword": QueryWord{namespace}}}
|
||||
shouldMatchPhrase.Shoulds = append(shouldMatchPhrase.Shoulds, matchPhrase)
|
||||
}
|
||||
}
|
||||
shouldMatchPhrase.MinimumShouldMatch = 1
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, BoolShouldMatchPhrase{shouldMatchPhrase})
|
||||
}
|
||||
if param.PodFilled {
|
||||
var shouldMatchPhrase ShouldMatchPhrase
|
||||
if len(param.Pods) == 0 {
|
||||
matchPhrase := MatchPhrase{map[string]interface{}{"kubernetes.pod_name.key_word": QueryWord{""}}}
|
||||
shouldMatchPhrase.Shoulds = append(shouldMatchPhrase.Shoulds, matchPhrase)
|
||||
} else {
|
||||
for _, pod := range param.Pods {
|
||||
matchPhrase := MatchPhrase{map[string]interface{}{"kubernetes.pod_name.keyword": QueryWord{pod}}}
|
||||
shouldMatchPhrase.Shoulds = append(shouldMatchPhrase.Shoulds, matchPhrase)
|
||||
}
|
||||
}
|
||||
shouldMatchPhrase.MinimumShouldMatch = 1
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, BoolShouldMatchPhrase{shouldMatchPhrase})
|
||||
}
|
||||
if param.ContainerFilled {
|
||||
var shouldMatchPhrase ShouldMatchPhrase
|
||||
if len(param.Containers) == 0 {
|
||||
matchPhrase := MatchPhrase{map[string]interface{}{"kubernetes.container_name.key_word": QueryWord{""}}}
|
||||
shouldMatchPhrase.Shoulds = append(shouldMatchPhrase.Shoulds, matchPhrase)
|
||||
} else {
|
||||
for _, container := range param.Containers {
|
||||
matchPhrase := MatchPhrase{map[string]interface{}{"kubernetes.container_name.keyword": QueryWord{container}}}
|
||||
shouldMatchPhrase.Shoulds = append(shouldMatchPhrase.Shoulds, matchPhrase)
|
||||
}
|
||||
}
|
||||
shouldMatchPhrase.MinimumShouldMatch = 1
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, BoolShouldMatchPhrase{shouldMatchPhrase})
|
||||
}
|
||||
|
||||
if param.NamespaceQuery != "" {
|
||||
match := Match{map[string]interface{}{"kubernetes.namespace_name": QueryWord{param.NamespaceQuery}}}
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, match)
|
||||
}
|
||||
if param.PodQuery != "" {
|
||||
match := Match{map[string]interface{}{"kubernetes.pod_name": QueryWord{param.PodQuery}}}
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, match)
|
||||
}
|
||||
if param.ContainerQuery != "" {
|
||||
match := Match{map[string]interface{}{"kubernetes.container_name": QueryWord{param.ContainerQuery}}}
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, match)
|
||||
}
|
||||
|
||||
if param.LogQuery != "" {
|
||||
match := Match{map[string]interface{}{"log": QueryWord{param.LogQuery}}}
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, match)
|
||||
}
|
||||
|
||||
rangeQuery := RangeQuery{RangeSpec{TimeRange{param.StartTime, param.EndTime}}}
|
||||
mainBoolQuery.MainFilter.FilterBoolQuery.Musts = append(mainBoolQuery.MainFilter.FilterBoolQuery.Musts, rangeQuery)
|
||||
|
||||
var operation int
|
||||
|
||||
if param.Operation == "statistics" {
|
||||
operation = OperationStatistics
|
||||
containerAggs := ContainerAggs{ContainerAgg{StatisticsAggTerm{"kubernetes.container_name.keyword", 2147483647}}}
|
||||
namespaceAgg := NamespaceAgg{StatisticsAggTerm{"kubernetes.namespace_name.keyword", 2147483647}, containerAggs}
|
||||
request.Aggs = StatisticsAggs{namespaceAgg}
|
||||
request.Size = 0
|
||||
} else if param.Operation == "histogram" {
|
||||
operation = OperationHistogram
|
||||
var interval string
|
||||
if param.Interval != "" {
|
||||
interval = param.Interval
|
||||
} else {
|
||||
interval = "15m"
|
||||
}
|
||||
param.Interval = interval
|
||||
request.Aggs = HistogramAggs{HistogramAgg{DateHistogram{"time", interval}}}
|
||||
request.Size = 0
|
||||
} else {
|
||||
operation = OperationQuery
|
||||
request.From = param.From
|
||||
request.Size = param.Size
|
||||
var order string
|
||||
if strings.Compare(strings.ToLower(param.Sort), "asc") == 0 {
|
||||
order = "asc"
|
||||
} else {
|
||||
order = "desc"
|
||||
}
|
||||
request.Sorts = append(request.Sorts, Sort{Order{order}})
|
||||
|
||||
var mainHighLight MainHighLight
|
||||
mainHighLight.Fields = append(mainHighLight.Fields, LogHighLightField{})
|
||||
mainHighLight.Fields = append(mainHighLight.Fields, NamespaceHighLightField{})
|
||||
mainHighLight.Fields = append(mainHighLight.Fields, PodHighLightField{})
|
||||
mainHighLight.Fields = append(mainHighLight.Fields, ContainerHighLightField{})
|
||||
request.MainHighLight = mainHighLight
|
||||
}
|
||||
|
||||
request.MainQuery = MainQuery{mainBoolQuery}
|
||||
|
||||
queryRequest, err := json.Marshal(request)
|
||||
|
||||
return operation, queryRequest, err
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Status int `json:"status"`
|
||||
Workspace string `json:"workspace,omitempty"`
|
||||
Shards Shards `json:"_shards"`
|
||||
Hits Hits `json:"hits"`
|
||||
Aggregations json.RawMessage `json:"aggregations"`
|
||||
}
|
||||
|
||||
type Shards struct {
|
||||
Total int64 `json:"total"`
|
||||
Successful int64 `json:"successful"`
|
||||
Skipped int64 `json:"skipped"`
|
||||
Failed int64 `json:"failed"`
|
||||
}
|
||||
|
||||
type Hits struct {
|
||||
Total int64 `json:"total"`
|
||||
Hits []Hit `json:"hits"`
|
||||
}
|
||||
|
||||
type Hit struct {
|
||||
Source Source `json:"_source"`
|
||||
HighLight HighLight `json:"highlight"`
|
||||
}
|
||||
|
||||
type Source struct {
|
||||
Log string `json:"log"`
|
||||
Time string `json:"time"`
|
||||
Kubernetes Kubernetes `json:"kubernetes"`
|
||||
}
|
||||
|
||||
type Kubernetes struct {
|
||||
Namespace string `json:"namespace_name"`
|
||||
Pod string `json:"pod_name"`
|
||||
Container string `json:"container_name"`
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
type HighLight struct {
|
||||
LogHighLights []string `json:"log,omitempty"`
|
||||
NamespaceHighLights []string `json:"kubernetes.namespace_name.keyword,omitempty"`
|
||||
PodHighLights []string `json:"kubernetes.pod_name.keyword,omitempty"`
|
||||
ContainerHighLights []string `json:"kubernetes.container_name.keyword,omitempty"`
|
||||
}
|
||||
|
||||
type LogRecord struct {
|
||||
Time int64 `json:"time,omitempty"`
|
||||
Log string `json:"log,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Pod string `json:"pod,omitempty"`
|
||||
Container string `json:"container,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
HighLight HighLight `json:"highlight,omitempty"`
|
||||
}
|
||||
|
||||
type ReadResult struct {
|
||||
Total int64 `json:"total"`
|
||||
From int64 `json:"from"`
|
||||
Size int64 `json:"size"`
|
||||
Records []LogRecord `json:"records,omitempty"`
|
||||
}
|
||||
|
||||
type NamespaceAggregations struct {
|
||||
NamespaceAggregation NamespaceAggregation `json:"Namespace"`
|
||||
}
|
||||
|
||||
type NamespaceAggregation struct {
|
||||
Namespaces []NamespaceStatistics `json:"buckets"`
|
||||
}
|
||||
|
||||
type NamespaceStatistics struct {
|
||||
Namespace string `json:"Key"`
|
||||
Count int64 `json:"doc_count"`
|
||||
ContainerAggregation ContainerAggregation `json:"Container"`
|
||||
}
|
||||
|
||||
type ContainerAggregation struct {
|
||||
Containers []ContainerStatistics `json:"buckets"`
|
||||
}
|
||||
|
||||
type ContainerStatistics struct {
|
||||
Container string `json:"Key"`
|
||||
Count int64 `json:"doc_count"`
|
||||
}
|
||||
|
||||
type NamespaceResult struct {
|
||||
Namespace string `json:"namespace"`
|
||||
Count int64 `json:"count"`
|
||||
Containers []ContainerResult `json:"containers"`
|
||||
}
|
||||
|
||||
type ContainerResult struct {
|
||||
Container string `json:"container"`
|
||||
Count int64 `json:"count"`
|
||||
}
|
||||
|
||||
type StatisticsResult struct {
|
||||
Total int64 `json:"total"`
|
||||
Namespaces []NamespaceResult `json:"namespaces"`
|
||||
}
|
||||
|
||||
type HistogramAggregations struct {
|
||||
HistogramAggregation HistogramAggregation `json:"histogram"`
|
||||
}
|
||||
|
||||
type HistogramAggregation struct {
|
||||
Histograms []HistogramStatistics `json:"buckets"`
|
||||
}
|
||||
|
||||
type HistogramStatistics struct {
|
||||
Time int64 `json:"key"`
|
||||
Count int64 `json:"doc_count"`
|
||||
}
|
||||
|
||||
type HistogramRecord struct {
|
||||
Time int64 `json:"time"`
|
||||
Count int64 `json:"count"`
|
||||
}
|
||||
|
||||
type HistogramResult struct {
|
||||
Total int64 `json:"total"`
|
||||
StartTime int64 `json:"start_time"`
|
||||
EndTime int64 `json:"end_time"`
|
||||
Interval string `json:"interval"`
|
||||
Histograms []HistogramRecord `json:"histograms"`
|
||||
}
|
||||
|
||||
type QueryResult struct {
|
||||
Status int `json:"status,omitempty"`
|
||||
Workspace string `json:"workspace,omitempty"`
|
||||
Read *ReadResult `json:"query,omitempty"`
|
||||
Statistics *StatisticsResult `json:"statistics,omitempty"`
|
||||
Histogram *HistogramResult `json:"histogram,omitempty"`
|
||||
Request string `json:"request,omitempty"`
|
||||
Response string `json:"response,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
OperationQuery int = iota
|
||||
OperationStatistics
|
||||
OperationHistogram
|
||||
)
|
||||
|
||||
func calcTimestamp(input string) int64 {
|
||||
var t time.Time
|
||||
var err error
|
||||
var ret int64
|
||||
|
||||
ret = 0
|
||||
|
||||
t, err = time.Parse(time.RFC3339, input)
|
||||
if err != nil {
|
||||
var i int64
|
||||
i, err = strconv.ParseInt(input, 10, 64)
|
||||
if err == nil {
|
||||
ret = time.Unix(i/1000, (i%1000)*1000000).UnixNano() / 1000000
|
||||
}
|
||||
} else {
|
||||
ret = t.UnixNano() / 1000000
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func parseQueryResult(operation int, param QueryParameters, body []byte, query []byte) *QueryResult {
|
||||
var queryResult QueryResult
|
||||
//queryResult.Request = string(query)
|
||||
//queryResult.Response = string(body)
|
||||
|
||||
var response Response
|
||||
err := jsonIter.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
//fmt.Println("Parse response error ", err.Error())
|
||||
queryResult.Status = http.StatusNotFound
|
||||
return &queryResult
|
||||
}
|
||||
|
||||
if response.Status != 0 {
|
||||
//Elastic error, eg, es_rejected_execute_exception
|
||||
queryResult.Status = response.Status
|
||||
return &queryResult
|
||||
}
|
||||
|
||||
if response.Shards.Successful != response.Shards.Total {
|
||||
//Elastic some shards error
|
||||
queryResult.Status = http.StatusInternalServerError
|
||||
return &queryResult
|
||||
}
|
||||
|
||||
switch operation {
|
||||
case OperationQuery:
|
||||
var readResult ReadResult
|
||||
readResult.Total = response.Hits.Total
|
||||
readResult.From = param.From
|
||||
readResult.Size = param.Size
|
||||
for _, hit := range response.Hits.Hits {
|
||||
var logRecord LogRecord
|
||||
logRecord.Time = calcTimestamp(hit.Source.Time)
|
||||
logRecord.Log = hit.Source.Log
|
||||
logRecord.Namespace = hit.Source.Kubernetes.Namespace
|
||||
logRecord.Pod = hit.Source.Kubernetes.Pod
|
||||
logRecord.Container = hit.Source.Kubernetes.Container
|
||||
logRecord.Host = hit.Source.Kubernetes.Host
|
||||
logRecord.HighLight = hit.HighLight
|
||||
readResult.Records = append(readResult.Records, logRecord)
|
||||
}
|
||||
queryResult.Read = &readResult
|
||||
|
||||
case OperationStatistics:
|
||||
var statisticsResult StatisticsResult
|
||||
statisticsResult.Total = response.Hits.Total
|
||||
|
||||
var namespaceAggregations NamespaceAggregations
|
||||
jsonIter.Unmarshal(response.Aggregations, &namespaceAggregations)
|
||||
|
||||
for _, namespace := range namespaceAggregations.NamespaceAggregation.Namespaces {
|
||||
var namespaceResult NamespaceResult
|
||||
namespaceResult.Namespace = namespace.Namespace
|
||||
namespaceResult.Count = namespace.Count
|
||||
|
||||
for _, container := range namespace.ContainerAggregation.Containers {
|
||||
var containerResult ContainerResult
|
||||
containerResult.Container = container.Container
|
||||
containerResult.Count = container.Count
|
||||
namespaceResult.Containers = append(namespaceResult.Containers, containerResult)
|
||||
}
|
||||
|
||||
statisticsResult.Namespaces = append(statisticsResult.Namespaces, namespaceResult)
|
||||
}
|
||||
|
||||
queryResult.Statistics = &statisticsResult
|
||||
|
||||
case OperationHistogram:
|
||||
var histogramResult HistogramResult
|
||||
histogramResult.Total = response.Hits.Total
|
||||
histogramResult.StartTime = calcTimestamp(param.StartTime)
|
||||
histogramResult.EndTime = calcTimestamp(param.EndTime)
|
||||
histogramResult.Interval = param.Interval
|
||||
|
||||
var histogramAggregations HistogramAggregations
|
||||
jsonIter.Unmarshal(response.Aggregations, &histogramAggregations)
|
||||
for _, histogram := range histogramAggregations.HistogramAggregation.Histograms {
|
||||
var histogramRecord HistogramRecord
|
||||
histogramRecord.Time = histogram.Time
|
||||
histogramRecord.Count = histogram.Count
|
||||
|
||||
histogramResult.Histograms = append(histogramResult.Histograms, histogramRecord)
|
||||
}
|
||||
|
||||
queryResult.Histogram = &histogramResult
|
||||
}
|
||||
|
||||
queryResult.Status = http.StatusOK
|
||||
queryResult.Workspace = param.Workspace
|
||||
|
||||
return &queryResult
|
||||
}
|
||||
|
||||
type QueryParameters struct {
|
||||
NamespaceFilled bool
|
||||
Namespaces []string
|
||||
PodFilled bool
|
||||
Pods []string
|
||||
ContainerFilled bool
|
||||
Containers []string
|
||||
|
||||
NamespaceQuery string
|
||||
PodQuery string
|
||||
ContainerQuery string
|
||||
|
||||
Workspace string
|
||||
|
||||
Operation string
|
||||
LogQuery string
|
||||
Interval string
|
||||
StartTime string
|
||||
EndTime string
|
||||
Sort string
|
||||
From int64
|
||||
Size int64
|
||||
}
|
||||
|
||||
func stubResult() *QueryResult {
|
||||
var queryResult QueryResult
|
||||
|
||||
queryResult.Status = http.StatusOK
|
||||
|
||||
return &queryResult
|
||||
}
|
||||
|
||||
func Query(param QueryParameters) *QueryResult {
|
||||
var queryResult *QueryResult
|
||||
|
||||
//queryResult = stubResult()
|
||||
//return queryResult
|
||||
|
||||
client := &http.Client{}
|
||||
|
||||
operation, query, err := createQueryRequest(param)
|
||||
if err != nil {
|
||||
//fmt.Println("Create query error ", err.Error())
|
||||
queryResult = new(QueryResult)
|
||||
queryResult.Status = http.StatusNotFound
|
||||
return queryResult
|
||||
}
|
||||
|
||||
es := readESConfigs()
|
||||
if es == nil {
|
||||
queryResult = new(QueryResult)
|
||||
queryResult.Status = http.StatusNotFound
|
||||
return queryResult
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("http://%s:%s/%s*/_search", es.Host, es.Port, es.Index)
|
||||
request, err := http.NewRequest("GET", url, bytes.NewBuffer(query))
|
||||
if err != nil {
|
||||
//fmt.Println("Create request error ", err.Error())
|
||||
queryResult = new(QueryResult)
|
||||
queryResult.Status = http.StatusNotFound
|
||||
return queryResult
|
||||
}
|
||||
request.Header.Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
//fmt.Println("Send request error ", err.Error())
|
||||
queryResult = new(QueryResult)
|
||||
queryResult.Status = http.StatusNotFound
|
||||
return queryResult
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
//fmt.Println("Read response error ", err.Error())
|
||||
queryResult = new(QueryResult)
|
||||
queryResult.Status = http.StatusNotFound
|
||||
return queryResult
|
||||
}
|
||||
|
||||
queryResult = parseQueryResult(operation, param, body, query)
|
||||
|
||||
return queryResult
|
||||
}
|
||||
289
pkg/simple/client/fluentbit/fluentbitcrdclient.go
Normal file
289
pkg/simple/client/fluentbit/fluentbitcrdclient.go
Normal file
@@ -0,0 +1,289 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package fluentbitclient
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
CRDPlural string = "fluentbits"
|
||||
CRDGroup string = "logging.kubesphere.io"
|
||||
CRDVersion string = "v1alpha1"
|
||||
FullCRDName string = CRDPlural + "." + CRDGroup
|
||||
)
|
||||
|
||||
// FluentBitList auto generated by the sdk
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type FluentBitList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []FluentBit `json:"items"`
|
||||
}
|
||||
|
||||
// FluentBit auto generated by the sdk
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type FluentBit struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
Spec FluentBitSpec `json:"spec"`
|
||||
Status FluentBitStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// FluentBitSpec holds the spec for the operator
|
||||
type FluentBitSpec struct {
|
||||
Service []Plugin `json:"service"`
|
||||
Input []Plugin `json:"input"`
|
||||
Filter []Plugin `json:"filter"`
|
||||
Output []Plugin `json:"output"`
|
||||
Settings []Plugin `json:"settings"`
|
||||
}
|
||||
|
||||
// FluentBitStatus holds the status info for the operator
|
||||
type FluentBitStatus struct {
|
||||
// Fill me
|
||||
}
|
||||
|
||||
// Plugin struct for fluent-bit plugins
|
||||
type Plugin struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Parameters []Parameter `json:"parameters"`
|
||||
}
|
||||
|
||||
// Fluent-bit output plugins
|
||||
type OutputPlugin struct {
|
||||
Plugin
|
||||
Id uint `json:"id"`
|
||||
Enable bool `json:"enable"`
|
||||
Updatetime time.Time `json:"updatetime"`
|
||||
}
|
||||
|
||||
// Parameter generic parameter type to handle values from different sources
|
||||
type Parameter struct {
|
||||
Name string `json:"name"`
|
||||
ValueFrom *ValueFrom `json:"valueFrom,omitempty"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// ValueFrom generic type to determine value origin
|
||||
type ValueFrom struct {
|
||||
SecretKeyRef KubernetesSecret `json:"secretKeyRef"`
|
||||
}
|
||||
|
||||
// KubernetesSecret is a ValueFrom type
|
||||
type KubernetesSecret struct {
|
||||
Name string `json:"name"`
|
||||
Key string `json:"key"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FluentBit) DeepCopyInto(out *FluentBit) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBit.
|
||||
func (in *FluentBit) DeepCopy() *FluentBit {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FluentBit)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FluentBit) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FluentBitList) DeepCopyInto(out *FluentBitList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]FluentBit, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBitList.
|
||||
func (in *FluentBitList) DeepCopy() *FluentBitList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FluentBitList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FluentBitList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FluentBitSpec) DeepCopyInto(out *FluentBitSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBitSpec.
|
||||
func (in *FluentBitSpec) DeepCopy() *FluentBitSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FluentBitSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FluentBitStatus) DeepCopyInto(out *FluentBitStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentBitStatus.
|
||||
func (in *FluentBitStatus) DeepCopy() *FluentBitStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FluentBitStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// Create a Rest client with the new CRD Schema
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&FluentBit{},
|
||||
&FluentBitList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFluentbitCRDClient(cfg *rest.Config) (*rest.RESTClient, *runtime.Scheme, error) {
|
||||
scheme := runtime.NewScheme()
|
||||
SchemeBuilder := runtime.NewSchemeBuilder(addKnownTypes)
|
||||
if err := SchemeBuilder.AddToScheme(scheme); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
config := *cfg
|
||||
config.GroupVersion = &SchemeGroupVersion
|
||||
config.APIPath = "/apis"
|
||||
config.ContentType = runtime.ContentTypeJSON
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{
|
||||
CodecFactory: serializer.NewCodecFactory(scheme)}
|
||||
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return client, scheme, nil
|
||||
}
|
||||
|
||||
// This file implement all the (CRUD) client methods we need to access our CRD object
|
||||
|
||||
func CrdClient(cl *rest.RESTClient, scheme *runtime.Scheme, namespace string) *crdclient {
|
||||
return &crdclient{cl: cl, ns: namespace, plural: CRDPlural,
|
||||
codec: runtime.NewParameterCodec(scheme)}
|
||||
}
|
||||
|
||||
type crdclient struct {
|
||||
cl *rest.RESTClient
|
||||
ns string
|
||||
plural string
|
||||
codec runtime.ParameterCodec
|
||||
}
|
||||
|
||||
func (f *crdclient) Create(obj *FluentBit) (*FluentBit, error) {
|
||||
var result FluentBit
|
||||
err := f.cl.Post().
|
||||
Namespace(f.ns).Resource(f.plural).
|
||||
Body(obj).Do().Into(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (f *crdclient) Update(name string, obj *FluentBit) (*FluentBit, error) {
|
||||
var result FluentBit
|
||||
err := f.cl.Put().
|
||||
Namespace(f.ns).Resource(f.plural).
|
||||
Name(name).Body(obj).Do().Into(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (f *crdclient) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return f.cl.Delete().
|
||||
Namespace(f.ns).Resource(f.plural).
|
||||
Name(name).Body(options).Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
func (f *crdclient) Get(name string) (*FluentBit, error) {
|
||||
var result FluentBit
|
||||
err := f.cl.Get().
|
||||
Namespace(f.ns).Resource(f.plural).
|
||||
Name(name).Do().Into(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (f *crdclient) List(opts metav1.ListOptions) (*FluentBitList, error) {
|
||||
var result FluentBitList
|
||||
err := f.cl.Get().
|
||||
Namespace(f.ns).Resource(f.plural).
|
||||
VersionedParams(&opts, f.codec).
|
||||
Do().Into(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
// Create a new List watch for our TPR
|
||||
func (f *crdclient) NewListWatch() *cache.ListWatch {
|
||||
return cache.NewListWatchFromClient(f.cl, f.plural, f.ns, fields.Everything())
|
||||
}
|
||||
|
||||
// return rest config, if path not specified assume in cluster config
|
||||
func GetClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig != "" {
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
@@ -18,7 +18,6 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -26,54 +25,58 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultQueryStep = "10m"
|
||||
DefaultQueryTimeout = "10s"
|
||||
RangeQueryType = "query_range?"
|
||||
DefaultQueryType = "query?"
|
||||
DefaultScheme = "http"
|
||||
DefaultPrometheusPort = "9090"
|
||||
PrometheusApiPath = "/api/v1/"
|
||||
DefaultQueryStep = "10m"
|
||||
DefaultQueryTimeout = "10s"
|
||||
RangeQueryType = "query_range?"
|
||||
DefaultQueryType = "query?"
|
||||
PrometheusAPIServerEnv = "PROMETHEUS_API_SERVER"
|
||||
)
|
||||
|
||||
var (
|
||||
PrometheusAPIEndpoint string
|
||||
)
|
||||
var PrometheusAPIServer = "prometheus-k8s.kubesphere-monitoring-system.svc"
|
||||
var PrometheusAPIEndpoint string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&PrometheusAPIEndpoint, "prometheus-endpoint", "http://prometheus-k8s.kubesphere-monitoring-system.svc:9090/api/v1/", "prometheus api endpoint")
|
||||
if env := os.Getenv(PrometheusAPIServerEnv); env != "" {
|
||||
PrometheusAPIServer = env
|
||||
}
|
||||
PrometheusAPIEndpoint = DefaultScheme + "://" + PrometheusAPIServer + ":" + DefaultPrometheusPort + PrometheusApiPath
|
||||
}
|
||||
|
||||
type MonitoringRequestParams struct {
|
||||
Params url.Values
|
||||
QueryType string
|
||||
SortMetricName string
|
||||
SortType string
|
||||
PageNum string
|
||||
LimitNum string
|
||||
Tp string
|
||||
MetricsFilter string
|
||||
NodesFilter string
|
||||
WsFilter string
|
||||
NsFilter string
|
||||
PodsFilter string
|
||||
ContainersFilter string
|
||||
MetricsName string
|
||||
WorkloadName string
|
||||
WlFilter string
|
||||
NodeId string
|
||||
WsName string
|
||||
NsName string
|
||||
PodName string
|
||||
ContainerName string
|
||||
WorkloadKind string
|
||||
Params url.Values
|
||||
QueryType string
|
||||
SortMetricName string
|
||||
SortType string
|
||||
PageNum string
|
||||
LimitNum string
|
||||
Tp string
|
||||
MetricsFilter string
|
||||
ResourcesFilter string
|
||||
MetricsName string
|
||||
WorkloadName string
|
||||
NodeId string
|
||||
WsName string
|
||||
NsName string
|
||||
PodName string
|
||||
ContainerName string
|
||||
WorkloadKind string
|
||||
}
|
||||
|
||||
var client = &http.Client{}
|
||||
|
||||
func SendMonitoringRequest(queryType string, params string) string {
|
||||
epurl := PrometheusAPIEndpoint + queryType + params
|
||||
|
||||
response, err := http.DefaultClient.Get(epurl)
|
||||
response, err := client.Get(epurl)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
} else {
|
||||
@@ -103,15 +106,10 @@ func ParseMonitoringRequestParams(request *restful.Request) *MonitoringRequestPa
|
||||
tp := strings.Trim(request.QueryParameter("type"), " ")
|
||||
|
||||
metricsFilter := strings.Trim(request.QueryParameter("metrics_filter"), " ")
|
||||
nodesFilter := strings.Trim(request.QueryParameter("nodes_filter"), " ")
|
||||
wsFilter := strings.Trim(request.QueryParameter("workspaces_filter"), " ")
|
||||
nsFilter := strings.Trim(request.QueryParameter("namespaces_filter"), " ")
|
||||
wlFilter := strings.Trim(request.QueryParameter("workloads_filter"), " ")
|
||||
podsFilter := strings.Trim(request.QueryParameter("pods_filter"), " ")
|
||||
containersFilter := strings.Trim(request.QueryParameter("containers_filter"), " ")
|
||||
resourcesFilter := strings.Trim(request.QueryParameter("resources_filter"), " ")
|
||||
|
||||
metricsName := strings.Trim(request.QueryParameter("metrics_name"), " ")
|
||||
workloadName := strings.Trim(request.QueryParameter("workload_name"), " ")
|
||||
workloadName := strings.Trim(request.PathParameter("workload"), " ")
|
||||
|
||||
nodeId := strings.Trim(request.PathParameter("node"), " ")
|
||||
wsName := strings.Trim(request.PathParameter("workspace"), " ")
|
||||
@@ -121,26 +119,21 @@ func ParseMonitoringRequestParams(request *restful.Request) *MonitoringRequestPa
|
||||
workloadKind := strings.Trim(request.PathParameter("workload_kind"), " ")
|
||||
|
||||
var requestParams = MonitoringRequestParams{
|
||||
SortMetricName: sortMetricName,
|
||||
SortType: sortType,
|
||||
PageNum: pageNum,
|
||||
LimitNum: limitNum,
|
||||
Tp: tp,
|
||||
MetricsFilter: metricsFilter,
|
||||
NodesFilter: nodesFilter,
|
||||
WsFilter: wsFilter,
|
||||
NsFilter: nsFilter,
|
||||
PodsFilter: podsFilter,
|
||||
ContainersFilter: containersFilter,
|
||||
MetricsName: metricsName,
|
||||
WorkloadName: workloadName,
|
||||
WlFilter: wlFilter,
|
||||
NodeId: nodeId,
|
||||
WsName: wsName,
|
||||
NsName: nsName,
|
||||
PodName: podName,
|
||||
ContainerName: containerName,
|
||||
WorkloadKind: workloadKind,
|
||||
SortMetricName: sortMetricName,
|
||||
SortType: sortType,
|
||||
PageNum: pageNum,
|
||||
LimitNum: limitNum,
|
||||
Tp: tp,
|
||||
MetricsFilter: metricsFilter,
|
||||
ResourcesFilter: resourcesFilter,
|
||||
MetricsName: metricsName,
|
||||
WorkloadName: workloadName,
|
||||
NodeId: nodeId,
|
||||
WsName: wsName,
|
||||
NsName: nsName,
|
||||
PodName: podName,
|
||||
ContainerName: containerName,
|
||||
WorkloadKind: workloadKind,
|
||||
}
|
||||
|
||||
if timeout == "" {
|
||||
|
||||
Reference in New Issue
Block a user