feat: kubesphere 4.0 (#6115)
* feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> * feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> --------- Signed-off-by: ci-bot <ci-bot@kubesphere.io> Co-authored-by: ks-ci-bot <ks-ci-bot@example.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
committed by
GitHub
parent
b5015ec7b9
commit
447a51f08b
@@ -1,899 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
prominformersv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1"
|
||||
promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
coreinformersv1 "k8s.io/client-go/informers/core/v1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/alerting/rules"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
|
||||
)
|
||||
|
||||
const (
|
||||
rulerNamespace = constants.KubeSphereMonitoringNamespace
|
||||
)
|
||||
|
||||
// Operator contains all operations to alerting rules. The operations may involve manipulations of prometheusrule
|
||||
// custom resources where the rules are persisted, and querying the rules state from prometheus endpoint and
|
||||
// thanos ruler endpoint.
|
||||
// For the following apis, if namespace is empty, do operations to alerting rules with cluster level,
|
||||
// or do operations only to rules of the specified namespaces.
|
||||
// All custom rules will be configured for thanos ruler, so the operations to custom alerting rule can not be done
|
||||
// if thanos ruler is not enabled.
|
||||
type Operator interface {
|
||||
// ListCustomAlertingRules lists the custom alerting rules.
|
||||
ListCustomAlertingRules(ctx context.Context, namespace string,
|
||||
queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error)
|
||||
// ListCustomRulesAlerts lists the alerts of the custom alerting rules.
|
||||
ListCustomRulesAlerts(ctx context.Context, namespace string,
|
||||
queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error)
|
||||
// GetCustomAlertingRule gets the custom alerting rule with the given name.
|
||||
GetCustomAlertingRule(ctx context.Context, namespace, ruleName string) (*v2alpha1.GettableAlertingRule, error)
|
||||
// ListCustomRuleAlerts lists the alerts of the custom alerting rule with the given name.
|
||||
ListCustomRuleAlerts(ctx context.Context, namespace, ruleName string) (*v2alpha1.AlertList, error)
|
||||
// CreateCustomAlertingRule creates a custom alerting rule.
|
||||
CreateCustomAlertingRule(ctx context.Context, namespace string, rule *v2alpha1.PostableAlertingRule) error
|
||||
// UpdateCustomAlertingRule updates the custom alerting rule with the given name.
|
||||
UpdateCustomAlertingRule(ctx context.Context, namespace, ruleName string, rule *v2alpha1.PostableAlertingRule) error
|
||||
// DeleteCustomAlertingRule deletes the custom alerting rule with the given name.
|
||||
DeleteCustomAlertingRule(ctx context.Context, namespace, ruleName string) error
|
||||
|
||||
// CreateOrUpdateCustomAlertingRules creates or updates custom alerting rules in bulk.
|
||||
CreateOrUpdateCustomAlertingRules(ctx context.Context, namespace string, rs []*v2alpha1.PostableAlertingRule) (*v2alpha1.BulkResponse, error)
|
||||
// DeleteCustomAlertingRules deletes a batch of custom alerting rules.
|
||||
DeleteCustomAlertingRules(ctx context.Context, namespace string, ruleNames []string) (*v2alpha1.BulkResponse, error)
|
||||
|
||||
// ListBuiltinAlertingRules lists the builtin(non-custom) alerting rules
|
||||
ListBuiltinAlertingRules(ctx context.Context,
|
||||
queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error)
|
||||
// ListBuiltinRulesAlerts lists the alerts of the builtin(non-custom) alerting rules
|
||||
ListBuiltinRulesAlerts(ctx context.Context,
|
||||
queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error)
|
||||
// GetBuiltinAlertingRule gets the builtin(non-custom) alerting rule with the given id
|
||||
GetBuiltinAlertingRule(ctx context.Context, ruleId string) (*v2alpha1.GettableAlertingRule, error)
|
||||
// ListBuiltinRuleAlerts lists the alerts of the builtin(non-custom) alerting rule with the given id
|
||||
ListBuiltinRuleAlerts(ctx context.Context, ruleId string) (*v2alpha1.AlertList, error)
|
||||
}
|
||||
|
||||
func NewOperator(informers informers.InformerFactory,
|
||||
promResourceClient promresourcesclient.Interface, ruleClient alerting.RuleClient,
|
||||
option *alerting.Options) Operator {
|
||||
o := operator{
|
||||
namespaceInformer: informers.KubernetesSharedInformerFactory().Core().V1().Namespaces(),
|
||||
|
||||
promResourceClient: promResourceClient,
|
||||
|
||||
prometheusInformer: informers.PrometheusSharedInformerFactory().Monitoring().V1().Prometheuses(),
|
||||
thanosRulerInformer: informers.PrometheusSharedInformerFactory().Monitoring().V1().ThanosRulers(),
|
||||
ruleResourceInformer: informers.PrometheusSharedInformerFactory().Monitoring().V1().PrometheusRules(),
|
||||
|
||||
ruleClient: ruleClient,
|
||||
|
||||
thanosRuleResourceLabels: make(map[string]string),
|
||||
}
|
||||
|
||||
o.resourceRuleCache = rules.NewRuleCache(o.ruleResourceInformer)
|
||||
|
||||
if option != nil && len(option.ThanosRuleResourceLabels) != 0 {
|
||||
lblStrings := strings.Split(option.ThanosRuleResourceLabels, ",")
|
||||
for _, lblString := range lblStrings {
|
||||
lbl := strings.Split(strings.TrimSpace(lblString), "=")
|
||||
if len(lbl) == 2 {
|
||||
o.thanosRuleResourceLabels[lbl[0]] = lbl[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &o
|
||||
}
|
||||
|
||||
type operator struct {
|
||||
ruleClient alerting.RuleClient
|
||||
|
||||
promResourceClient promresourcesclient.Interface
|
||||
|
||||
prometheusInformer prominformersv1.PrometheusInformer
|
||||
thanosRulerInformer prominformersv1.ThanosRulerInformer
|
||||
ruleResourceInformer prominformersv1.PrometheusRuleInformer
|
||||
|
||||
namespaceInformer coreinformersv1.NamespaceInformer
|
||||
|
||||
resourceRuleCache *rules.RuleCache
|
||||
|
||||
thanosRuleResourceLabels map[string]string
|
||||
}
|
||||
|
||||
func (o *operator) ListCustomAlertingRules(ctx context.Context, namespace string,
|
||||
queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) {
|
||||
|
||||
var level v2alpha1.RuleLevel
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
}
|
||||
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alertingRules, err := o.listCustomAlertingRules(ctx, ruleNamespace, level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pageAlertingRules(alertingRules, queryParams), nil
|
||||
}
|
||||
|
||||
func (o *operator) ListCustomRulesAlerts(ctx context.Context, namespace string,
|
||||
queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) {
|
||||
|
||||
var level v2alpha1.RuleLevel
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
}
|
||||
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alertingRules, err := o.listCustomAlertingRules(ctx, ruleNamespace, level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pageAlerts(alertingRules, queryParams), nil
|
||||
}
|
||||
|
||||
func (o *operator) GetCustomAlertingRule(ctx context.Context, namespace, ruleName string) (
|
||||
*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
var level v2alpha1.RuleLevel
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
}
|
||||
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return o.getCustomAlertingRule(ctx, ruleNamespace, ruleName, level)
|
||||
}
|
||||
|
||||
func (o *operator) ListCustomRuleAlerts(ctx context.Context, namespace, ruleName string) (
|
||||
*v2alpha1.AlertList, error) {
|
||||
|
||||
rule, err := o.GetCustomAlertingRule(ctx, namespace, ruleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rule == nil {
|
||||
return nil, v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
return &v2alpha1.AlertList{
|
||||
Total: len(rule.Alerts),
|
||||
Items: rule.Alerts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *operator) ListBuiltinAlertingRules(ctx context.Context,
|
||||
queryParams *v2alpha1.AlertingRuleQueryParams) (*v2alpha1.GettableAlertingRuleList, error) {
|
||||
|
||||
alertingRules, err := o.listBuiltinAlertingRules(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pageAlertingRules(alertingRules, queryParams), nil
|
||||
}
|
||||
|
||||
func (o *operator) ListBuiltinRulesAlerts(ctx context.Context,
|
||||
queryParams *v2alpha1.AlertQueryParams) (*v2alpha1.AlertList, error) {
|
||||
alertingRules, err := o.listBuiltinAlertingRules(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pageAlerts(alertingRules, queryParams), nil
|
||||
}
|
||||
|
||||
func (o *operator) GetBuiltinAlertingRule(ctx context.Context, ruleId string) (
|
||||
*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
return o.getBuiltinAlertingRule(ctx, ruleId)
|
||||
}
|
||||
|
||||
func (o *operator) ListBuiltinRuleAlerts(ctx context.Context, ruleId string) (*v2alpha1.AlertList, error) {
|
||||
rule, err := o.getBuiltinAlertingRule(ctx, ruleId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rule == nil {
|
||||
return nil, v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
return &v2alpha1.AlertList{
|
||||
Total: len(rule.Alerts),
|
||||
Items: rule.Alerts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *operator) listCustomAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
level v2alpha1.RuleLevel) ([]*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ruler == nil {
|
||||
return nil, v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
resourceRulesMap, err := o.resourceRuleCache.ListRules(ruler, ruleNamespace,
|
||||
labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(resourceRulesMap) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ruleGroups, err := o.ruleClient.ThanosRules(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rules.GetAlertingRulesStatus(ruleNamespace.Name, &rules.ResourceRuleChunk{
|
||||
ResourceRulesMap: resourceRulesMap,
|
||||
Custom: true,
|
||||
Level: level,
|
||||
}, ruleGroups, ruler.ExternalLabels())
|
||||
}
|
||||
|
||||
func (o *operator) getCustomAlertingRule(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
ruleName string, level v2alpha1.RuleLevel) (*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ruler == nil {
|
||||
return nil, v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace,
|
||||
labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)}), ruleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resourceRule == nil {
|
||||
return nil, v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
|
||||
ruleGroups, err := o.ruleClient.ThanosRules(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rules.GetAlertingRuleStatus(ruleNamespace.Name, &rules.ResourceRule{
|
||||
ResourceRuleItem: *resourceRule,
|
||||
Custom: true,
|
||||
Level: level,
|
||||
}, ruleGroups, ruler.ExternalLabels())
|
||||
}
|
||||
|
||||
func (o *operator) listBuiltinAlertingRules(ctx context.Context) (
|
||||
[]*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
ruler, err := o.getPrometheusRuler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ruleGroups, err := o.ruleClient.PrometheusRules(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ruler == nil {
|
||||
// for out-cluster prometheus
|
||||
return rules.ParseAlertingRules(ruleGroups, false, v2alpha1.RuleLevelCluster,
|
||||
func(group, id string, rule *alerting.AlertingRule) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
namespace := rulerNamespace
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceRulesMap, err := o.resourceRuleCache.ListRules(ruler, ruleNamespace, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rules.GetAlertingRulesStatus(ruleNamespace.Name, &rules.ResourceRuleChunk{
|
||||
ResourceRulesMap: resourceRulesMap,
|
||||
Custom: false,
|
||||
Level: v2alpha1.RuleLevelCluster,
|
||||
}, ruleGroups, ruler.ExternalLabels())
|
||||
}
|
||||
|
||||
func (o *operator) getBuiltinAlertingRule(ctx context.Context, ruleId string) (*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
ruler, err := o.getPrometheusRuler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ruleGroups, err := o.ruleClient.PrometheusRules(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ruler == nil {
|
||||
// for out-cluster prometheus
|
||||
alertingRules, err := rules.ParseAlertingRules(ruleGroups, false, v2alpha1.RuleLevelCluster,
|
||||
func(group, id string, rule *alerting.AlertingRule) bool {
|
||||
return ruleId == id
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(alertingRules) == 0 {
|
||||
return nil, v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
sort.Slice(alertingRules, func(i, j int) bool {
|
||||
return v2alpha1.AlertingRuleIdCompare(alertingRules[i].Id, alertingRules[j].Id)
|
||||
})
|
||||
return alertingRules[0], nil
|
||||
}
|
||||
|
||||
namespace := rulerNamespace
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, nil, ruleId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resourceRule == nil {
|
||||
return nil, v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
|
||||
return rules.GetAlertingRuleStatus(ruleNamespace.Name, &rules.ResourceRule{
|
||||
ResourceRuleItem: *resourceRule,
|
||||
Custom: false,
|
||||
Level: v2alpha1.RuleLevelCluster,
|
||||
}, ruleGroups, ruler.ExternalLabels())
|
||||
}
|
||||
|
||||
func (o *operator) CreateCustomAlertingRule(ctx context.Context, namespace string,
|
||||
rule *v2alpha1.PostableAlertingRule) error {
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ruler == nil {
|
||||
return v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
level v2alpha1.RuleLevel
|
||||
ruleResourceLabels = make(map[string]string)
|
||||
)
|
||||
for k, v := range o.thanosRuleResourceLabels {
|
||||
ruleResourceLabels[k] = v
|
||||
}
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
expr, err := rules.InjectExprNamespaceLabel(rule.Query, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rule.Query = expr
|
||||
}
|
||||
ruleResourceLabels[rules.CustomRuleResourceLabelKeyLevel] = string(level)
|
||||
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)})
|
||||
resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, extraRuleResourceSelector, rule.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resourceRule != nil {
|
||||
return v2alpha1.ErrAlertingRuleAlreadyExists
|
||||
}
|
||||
|
||||
setRuleUpdateTime(rule, time.Now())
|
||||
|
||||
respItems, err := ruler.AddAlertingRules(ctx, ruleNamespace, extraRuleResourceSelector,
|
||||
ruleResourceLabels, &rules.RuleWithGroup{Rule: *parseToPrometheusRule(rule)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range respItems {
|
||||
if item.Status == v2alpha1.StatusError {
|
||||
if item.ErrorType == v2alpha1.ErrNotFound {
|
||||
return v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
return item.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *operator) UpdateCustomAlertingRule(ctx context.Context, namespace, name string,
|
||||
rule *v2alpha1.PostableAlertingRule) error {
|
||||
|
||||
rule.Name = name
|
||||
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ruler == nil {
|
||||
return v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
level v2alpha1.RuleLevel
|
||||
ruleResourceLabels = make(map[string]string)
|
||||
)
|
||||
for k, v := range o.thanosRuleResourceLabels {
|
||||
ruleResourceLabels[k] = v
|
||||
}
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
expr, err := rules.InjectExprNamespaceLabel(rule.Query, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rule.Query = expr
|
||||
}
|
||||
ruleResourceLabels[rules.CustomRuleResourceLabelKeyLevel] = string(level)
|
||||
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)})
|
||||
resourceRule, err := o.resourceRuleCache.GetRule(ruler, ruleNamespace, extraRuleResourceSelector, rule.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resourceRule == nil {
|
||||
return v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
|
||||
setRuleUpdateTime(rule, time.Now())
|
||||
|
||||
respItems, err := ruler.UpdateAlertingRules(ctx, ruleNamespace, extraRuleResourceSelector, ruleResourceLabels,
|
||||
&rules.ResourceRuleItem{ResourceName: resourceRule.ResourceName,
|
||||
RuleWithGroup: rules.RuleWithGroup{Group: resourceRule.Group, Rule: *parseToPrometheusRule(rule)}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range respItems {
|
||||
if item.Status == v2alpha1.StatusError {
|
||||
if item.ErrorType == v2alpha1.ErrNotFound {
|
||||
return v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
return item.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *operator) DeleteCustomAlertingRule(ctx context.Context, namespace, name string) error {
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ruler == nil {
|
||||
return v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
level v2alpha1.RuleLevel
|
||||
)
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
}
|
||||
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)})
|
||||
resourceRules, err := o.resourceRuleCache.GetRuleByIdOrName(ruler, ruleNamespace, extraRuleResourceSelector, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(resourceRules) == 0 {
|
||||
return v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
|
||||
respItems, err := ruler.DeleteAlertingRules(ctx, ruleNamespace, resourceRules...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range respItems {
|
||||
if item.Status == v2alpha1.StatusError {
|
||||
if item.ErrorType == v2alpha1.ErrNotFound {
|
||||
return v2alpha1.ErrAlertingRuleNotFound
|
||||
}
|
||||
return item.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *operator) CreateOrUpdateCustomAlertingRules(ctx context.Context, namespace string,
|
||||
rs []*v2alpha1.PostableAlertingRule) (*v2alpha1.BulkResponse, error) {
|
||||
|
||||
if l := len(rs); l == 0 {
|
||||
return &v2alpha1.BulkResponse{}, nil
|
||||
}
|
||||
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ruler == nil {
|
||||
return nil, v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
level v2alpha1.RuleLevel
|
||||
ruleResourceLabels = make(map[string]string)
|
||||
)
|
||||
for k, v := range o.thanosRuleResourceLabels {
|
||||
ruleResourceLabels[k] = v
|
||||
}
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
}
|
||||
ruleResourceLabels[rules.CustomRuleResourceLabelKeyLevel] = string(level)
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)})
|
||||
|
||||
resourceRulesMap, err := o.resourceRuleCache.ListRules(ruler, ruleNamespace, extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists := make(map[string][]*rules.ResourceRuleItem)
|
||||
for _, c := range resourceRulesMap {
|
||||
for n, items := range c.NameRules {
|
||||
exists[n] = append(exists[n], items...)
|
||||
}
|
||||
}
|
||||
|
||||
// check all the rules
|
||||
var (
|
||||
br = &v2alpha1.BulkResponse{}
|
||||
nameSet = make(map[string]struct{})
|
||||
invalids = make(map[string]struct{})
|
||||
)
|
||||
for i := range rs {
|
||||
var (
|
||||
r = rs[i]
|
||||
name = r.Name
|
||||
)
|
||||
|
||||
if _, ok := nameSet[name]; ok {
|
||||
br.Items = append(br.Items, &v2alpha1.BulkItemResponse{
|
||||
RuleName: name,
|
||||
Status: v2alpha1.StatusError,
|
||||
ErrorType: v2alpha1.ErrDuplicateName,
|
||||
Error: errors.Errorf("There is more than one rule named %s in the bulk request", name),
|
||||
})
|
||||
invalids[name] = struct{}{}
|
||||
continue
|
||||
} else {
|
||||
nameSet[name] = struct{}{}
|
||||
}
|
||||
if err := r.Validate(); err != nil {
|
||||
br.Items = append(br.Items, &v2alpha1.BulkItemResponse{
|
||||
RuleName: name,
|
||||
Status: v2alpha1.StatusError,
|
||||
ErrorType: v2alpha1.ErrBadData,
|
||||
Error: err,
|
||||
})
|
||||
invalids[name] = struct{}{}
|
||||
continue
|
||||
}
|
||||
if level == v2alpha1.RuleLevelNamespace {
|
||||
expr, err := rules.InjectExprNamespaceLabel(r.Query, namespace)
|
||||
if err != nil {
|
||||
br.Items = append(br.Items, v2alpha1.NewBulkItemErrorServerResponse(name, err))
|
||||
invalids[name] = struct{}{}
|
||||
continue
|
||||
}
|
||||
r.Query = expr
|
||||
}
|
||||
}
|
||||
if len(nameSet) == len(invalids) {
|
||||
return br.MakeBulkResponse(), nil
|
||||
}
|
||||
|
||||
// Confirm whether the rules should be added or updated. For each rule that is committed,
|
||||
// it will be added if the rule does not exist, or updated otherwise.
|
||||
// If there are rules with the same name in the existing rules to update, the first will be
|
||||
// updated but the others will be deleted
|
||||
var (
|
||||
addRules []*rules.RuleWithGroup
|
||||
updRules []*rules.ResourceRuleItem
|
||||
delRules []*rules.ResourceRuleItem // duplicate rules that need to deleted in other resources
|
||||
|
||||
updateTime = time.Now()
|
||||
)
|
||||
for i := range rs {
|
||||
r := rs[i]
|
||||
if _, ok := invalids[r.Name]; ok {
|
||||
continue
|
||||
}
|
||||
setRuleUpdateTime(r, updateTime)
|
||||
if items, ok := exists[r.Name]; ok && len(items) > 0 {
|
||||
item := items[0]
|
||||
updRules = append(updRules, &rules.ResourceRuleItem{
|
||||
ResourceName: item.ResourceName,
|
||||
RuleWithGroup: rules.RuleWithGroup{Group: item.Group, Rule: *parseToPrometheusRule(r)}})
|
||||
if len(items) > 1 {
|
||||
for j := 1; j < len(items); j++ {
|
||||
if items[j].ResourceName != item.ResourceName {
|
||||
delRules = append(delRules, items[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addRules = append(addRules, &rules.RuleWithGroup{Rule: *parseToPrometheusRule(r)})
|
||||
}
|
||||
}
|
||||
|
||||
// add rules
|
||||
if len(addRules) > 0 {
|
||||
resps, err := ruler.AddAlertingRules(ctx, ruleNamespace, extraRuleResourceSelector, ruleResourceLabels, addRules...)
|
||||
if err == nil {
|
||||
br.Items = append(br.Items, resps...)
|
||||
} else {
|
||||
for _, rule := range addRules {
|
||||
br.Items = append(br.Items, v2alpha1.NewBulkItemErrorServerResponse(rule.Alert, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
// update existing rules
|
||||
if len(updRules) > 0 {
|
||||
resps, err := ruler.UpdateAlertingRules(ctx, ruleNamespace, extraRuleResourceSelector, ruleResourceLabels, updRules...)
|
||||
if err == nil {
|
||||
br.Items = append(br.Items, resps...)
|
||||
} else {
|
||||
for _, rule := range updRules {
|
||||
br.Items = append(br.Items, v2alpha1.NewBulkItemErrorServerResponse(rule.Alert, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
// delete possible duplicate rules
|
||||
if len(delRules) > 0 {
|
||||
_, err := ruler.DeleteAlertingRules(ctx, ruleNamespace, delRules...)
|
||||
if err != nil {
|
||||
for _, rule := range delRules {
|
||||
br.Items = append(br.Items, v2alpha1.NewBulkItemErrorServerResponse(rule.Alert, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return br.MakeBulkResponse(), nil
|
||||
}
|
||||
|
||||
func (o *operator) DeleteCustomAlertingRules(ctx context.Context, namespace string,
|
||||
names []string) (*v2alpha1.BulkResponse, error) {
|
||||
|
||||
if l := len(names); l == 0 {
|
||||
return &v2alpha1.BulkResponse{}, nil
|
||||
}
|
||||
|
||||
ruler, err := o.getThanosRuler()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ruler == nil {
|
||||
return nil, v2alpha1.ErrThanosRulerNotEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
level v2alpha1.RuleLevel
|
||||
)
|
||||
if namespace == "" {
|
||||
namespace = rulerNamespace
|
||||
level = v2alpha1.RuleLevelCluster
|
||||
} else {
|
||||
level = v2alpha1.RuleLevelNamespace
|
||||
}
|
||||
ruleNamespace, err := o.namespaceInformer.Lister().Get(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extraRuleResourceSelector := labels.SelectorFromSet(labels.Set{rules.CustomRuleResourceLabelKeyLevel: string(level)})
|
||||
resourceRulesMap, err := o.resourceRuleCache.ListRules(ruler, ruleNamespace, extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists := make(map[string][]*rules.ResourceRuleItem)
|
||||
for _, c := range resourceRulesMap {
|
||||
for n, items := range c.NameRules {
|
||||
exists[n] = append(exists[n], items...)
|
||||
}
|
||||
}
|
||||
|
||||
br := &v2alpha1.BulkResponse{}
|
||||
var ruleItems []*rules.ResourceRuleItem
|
||||
for _, n := range names {
|
||||
if items, ok := exists[n]; ok {
|
||||
ruleItems = append(ruleItems, items...)
|
||||
} else {
|
||||
br.Items = append(br.Items, &v2alpha1.BulkItemResponse{
|
||||
RuleName: n,
|
||||
Status: v2alpha1.StatusError,
|
||||
ErrorType: v2alpha1.ErrNotFound,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
respItems, err := ruler.DeleteAlertingRules(ctx, ruleNamespace, ruleItems...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
br.Items = append(br.Items, respItems...)
|
||||
|
||||
return br.MakeBulkResponse(), nil
|
||||
}
|
||||
|
||||
// getPrometheusRuler gets the cluster-in prometheus
|
||||
func (o *operator) getPrometheusRuler() (rules.Ruler, error) {
|
||||
prometheuses, err := o.prometheusInformer.Lister().Prometheuses(rulerNamespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error listing prometheuses")
|
||||
}
|
||||
if len(prometheuses) > 1 {
|
||||
// It is not supported to have multiple Prometheus instances in the monitoring namespace for now
|
||||
return nil, errors.Errorf(
|
||||
"there is more than one prometheus custom resource in %s", rulerNamespace)
|
||||
}
|
||||
if len(prometheuses) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return rules.NewPrometheusRuler(prometheuses[0], o.ruleResourceInformer, o.promResourceClient), nil
|
||||
}
|
||||
|
||||
func (o *operator) getThanosRuler() (rules.Ruler, error) {
|
||||
thanosrulers, err := o.thanosRulerInformer.Lister().ThanosRulers(rulerNamespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error listing thanosrulers: ")
|
||||
}
|
||||
if len(thanosrulers) > 1 {
|
||||
// It is not supported to have multiple thanosruler instances in the monitoring namespace for now
|
||||
return nil, errors.Errorf(
|
||||
"there is more than one thanosruler custom resource in %s", rulerNamespace)
|
||||
}
|
||||
if len(thanosrulers) == 0 {
|
||||
// if there is no thanos ruler, custom rules will not be supported
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return rules.NewThanosRuler(thanosrulers[0], o.ruleResourceInformer, o.promResourceClient), nil
|
||||
}
|
||||
|
||||
func parseToPrometheusRule(rule *v2alpha1.PostableAlertingRule) *promresourcesv1.Rule {
|
||||
if _, ok := rule.Labels[rules.LabelKeyAlertType]; !ok {
|
||||
rule.Labels[rules.LabelKeyAlertType] = rules.LabelValueAlertType
|
||||
}
|
||||
return &promresourcesv1.Rule{
|
||||
Alert: rule.Name,
|
||||
Expr: intstr.FromString(rule.Query),
|
||||
For: promresourcesv1.Duration(rule.Duration),
|
||||
Labels: rule.Labels,
|
||||
Annotations: rule.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func pageAlertingRules(alertingRules []*v2alpha1.GettableAlertingRule,
|
||||
queryParams *v2alpha1.AlertingRuleQueryParams) *v2alpha1.GettableAlertingRuleList {
|
||||
|
||||
alertingRules = queryParams.Filter(alertingRules)
|
||||
queryParams.Sort(alertingRules)
|
||||
|
||||
return &v2alpha1.GettableAlertingRuleList{
|
||||
Total: len(alertingRules),
|
||||
Items: queryParams.Sub(alertingRules),
|
||||
}
|
||||
}
|
||||
|
||||
func pageAlerts(alertingRules []*v2alpha1.GettableAlertingRule,
|
||||
queryParams *v2alpha1.AlertQueryParams) *v2alpha1.AlertList {
|
||||
|
||||
var alerts []*v2alpha1.Alert
|
||||
for _, rule := range alertingRules {
|
||||
alerts = append(alerts, queryParams.Filter(rule.Alerts)...)
|
||||
}
|
||||
queryParams.Sort(alerts)
|
||||
|
||||
return &v2alpha1.AlertList{
|
||||
Total: len(alerts),
|
||||
Items: queryParams.Sub(alerts),
|
||||
}
|
||||
}
|
||||
|
||||
func setRuleUpdateTime(rule *v2alpha1.PostableAlertingRule, t time.Time) {
|
||||
if rule.Annotations == nil {
|
||||
rule.Annotations = make(map[string]string)
|
||||
}
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
}
|
||||
rule.Annotations[v2alpha1.AnnotationKeyRuleUpdateTime] = t.UTC().Format(time.RFC3339)
|
||||
}
|
||||
@@ -1,949 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
promlabels "github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
promrules "github.com/prometheus/prometheus/rules"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
|
||||
alertingv2beta1 "kubesphere.io/api/alerting/v2beta1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
kapialertingv2beta1 "kubesphere.io/kubesphere/pkg/api/alerting/v2beta1"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
alertinglisters "kubesphere.io/kubesphere/pkg/client/listers/alerting/v2beta1"
|
||||
controller "kubesphere.io/kubesphere/pkg/controller/alerting"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
|
||||
)
|
||||
|
||||
type RuleGroupOperator interface {
|
||||
ListRuleGroups(ctx context.Context, namespace string, queryParam *query.Query) (*api.ListResult, error)
|
||||
GetRuleGroup(ctx context.Context, namespace, name string) (*kapialertingv2beta1.RuleGroup, error)
|
||||
ListAlerts(ctx context.Context, namespace string, queryParam *query.Query) (*api.ListResult, error)
|
||||
|
||||
ListGlobalRuleGroups(ctx context.Context, queryParam *query.Query) (*api.ListResult, error)
|
||||
GetGlobalRuleGroup(ctx context.Context, name string) (*kapialertingv2beta1.GlobalRuleGroup, error)
|
||||
ListGlobalAlerts(ctx context.Context, queryParam *query.Query) (*api.ListResult, error)
|
||||
|
||||
ListClusterRuleGroups(ctx context.Context, queryParam *query.Query) (*api.ListResult, error)
|
||||
GetClusterRuleGroup(ctx context.Context, name string) (*kapialertingv2beta1.ClusterRuleGroup, error)
|
||||
ListClusterAlerts(ctx context.Context, queryParam *query.Query) (*api.ListResult, error)
|
||||
}
|
||||
|
||||
func NewRuleGroupOperator(informers informers.InformerFactory, ruleClient alerting.RuleClient) RuleGroupOperator {
|
||||
return &ruleGroupOperator{
|
||||
ruleClient: ruleClient,
|
||||
ruleGroupLister: informers.KubeSphereSharedInformerFactory().Alerting().V2beta1().RuleGroups().Lister(),
|
||||
clusterRuleGroupLister: informers.KubeSphereSharedInformerFactory().Alerting().V2beta1().ClusterRuleGroups().Lister(),
|
||||
globalRuleGroupLister: informers.KubeSphereSharedInformerFactory().Alerting().V2beta1().GlobalRuleGroups().Lister(),
|
||||
}
|
||||
}
|
||||
|
||||
type ruleGroupOperator struct {
|
||||
ruleClient alerting.RuleClient
|
||||
ruleGroupLister alertinglisters.RuleGroupLister
|
||||
clusterRuleGroupLister alertinglisters.ClusterRuleGroupLister
|
||||
globalRuleGroupLister alertinglisters.GlobalRuleGroupLister
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) listRuleGroups(ctx context.Context, namespace string, selector labels.Selector) ([]runtime.Object, error) {
|
||||
resourceRuleGroups, err := o.ruleGroupLister.RuleGroups(namespace).List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get rules matching '{rule_level="namespace",namespace="<namespace>"}' from thanos ruler
|
||||
matchers := []*promlabels.Matcher{{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyRuleLevel,
|
||||
Value: string(controller.RuleLevelNamesapce),
|
||||
}, {
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyNamespace,
|
||||
Value: namespace,
|
||||
}}
|
||||
statusRuleGroups, err := o.ruleClient.ThanosRules(ctx, matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var statusRuleGroupMap = make(map[string]*alerting.RuleGroup)
|
||||
for i := range statusRuleGroups {
|
||||
g := statusRuleGroups[i]
|
||||
// the matchers only filter rules and all groups still return,
|
||||
// and some of them may be with empty rules, so here check them and skip some.
|
||||
if len(g.Rules) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := statusRuleGroupMap[g.Name]; !ok {
|
||||
statusRuleGroupMap[g.Name] = g
|
||||
}
|
||||
}
|
||||
|
||||
// copy status info of statusRuleGroups to matched rulegroups
|
||||
var groups = make([]runtime.Object, len(resourceRuleGroups))
|
||||
for i := range resourceRuleGroups {
|
||||
setRuleGroupResourceTypeMeta(resourceRuleGroups[i])
|
||||
g := &kapialertingv2beta1.RuleGroup{
|
||||
RuleGroup: *resourceRuleGroups[i],
|
||||
Status: kapialertingv2beta1.RuleGroupStatus{
|
||||
RulesStatus: make([]kapialertingv2beta1.RuleStatus, len(resourceRuleGroups[i].Spec.Rules)),
|
||||
RulesStats: kapialertingv2beta1.RulesStats{},
|
||||
},
|
||||
}
|
||||
statusg, ok := statusRuleGroupMap[g.Name]
|
||||
specRules := g.Spec.Rules
|
||||
if ok && len(statusg.Rules) > 0 {
|
||||
var ruleIds = make([]string, len(specRules))
|
||||
var ruleDisableFlags = make([]bool, len(specRules))
|
||||
for j := range specRules {
|
||||
if specRules[j].Labels != nil {
|
||||
ruleIds[j] = specRules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]
|
||||
}
|
||||
ruleDisableFlags[j] = specRules[j].Disable
|
||||
}
|
||||
|
||||
copyRuleGroupStatus(statusg, &g.Status, ruleIds, ruleDisableFlags)
|
||||
} else {
|
||||
// for rules not loaded by rule reloader (eg.thanos) yet
|
||||
for j := range specRules {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if specRules[j].Disable {
|
||||
ruleStatus.State = stateDisabledString
|
||||
g.Status.RulesStats.Disabled++
|
||||
} else {
|
||||
ruleStatus.State = stateInactiveString
|
||||
g.Status.RulesStats.Inactive++
|
||||
}
|
||||
g.Status.RulesStatus[j] = ruleStatus
|
||||
}
|
||||
}
|
||||
groups[i] = g
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) ListRuleGroups(ctx context.Context, namespace string,
|
||||
queryParam *query.Query) (*api.ListResult, error) {
|
||||
|
||||
groups, err := o.listRuleGroups(ctx, namespace, queryParam.Selector())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listResult := resources.DefaultList(groups, queryParam, func(left, right runtime.Object, field query.Field) bool {
|
||||
hit, great := o.compareRuleGroupStatus(
|
||||
&(left.(*kapialertingv2beta1.RuleGroup).Status), &(right.(*kapialertingv2beta1.RuleGroup).Status), field)
|
||||
if hit {
|
||||
return great
|
||||
}
|
||||
return resources.DefaultObjectMetaCompare(
|
||||
left.(*kapialertingv2beta1.RuleGroup).ObjectMeta, right.(*kapialertingv2beta1.RuleGroup).ObjectMeta, field)
|
||||
}, func(obj runtime.Object, filter query.Filter) bool {
|
||||
hit, selected := o.filterRuleGroupStatus(&obj.(*kapialertingv2beta1.RuleGroup).Status, filter)
|
||||
if hit {
|
||||
return selected
|
||||
}
|
||||
return resources.DefaultObjectMetaFilter(obj.(*kapialertingv2beta1.RuleGroup).ObjectMeta, filter)
|
||||
})
|
||||
|
||||
return listResult, nil
|
||||
}
|
||||
|
||||
// compareRuleGroupStatus compare rulegroup status.
|
||||
// if field in status, return hit(true) and great(true if left great than right, else false).
|
||||
// if filed not in status, return hit(false) and great(false, should be unuseful).
|
||||
func (d *ruleGroupOperator) compareRuleGroupStatus(left, right *kapialertingv2beta1.RuleGroupStatus, field query.Field) (hit, great bool) {
|
||||
|
||||
switch field {
|
||||
case kapialertingv2beta1.FieldRuleGroupEvaluationTime:
|
||||
hit = true
|
||||
if left.EvaluationTime == nil {
|
||||
great = false
|
||||
} else if right.EvaluationTime == nil {
|
||||
great = true
|
||||
} else {
|
||||
great = *left.EvaluationTime > *right.EvaluationTime
|
||||
}
|
||||
case kapialertingv2beta1.FieldRuleGroupLastEvaluation:
|
||||
hit = true
|
||||
if left.LastEvaluation == nil {
|
||||
great = false
|
||||
} else if right.LastEvaluation == nil {
|
||||
great = true
|
||||
} else {
|
||||
great = left.LastEvaluation.After(*right.LastEvaluation)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// filterRuleGroupStatus filters by rulegroup status.
|
||||
// if field in status, return hit(true) and selected(true if match the filter, else false).
|
||||
// if filed not in status, return hit(false) and selected(false, should be unuseful).
|
||||
func (d *ruleGroupOperator) filterRuleGroupStatus(status *kapialertingv2beta1.RuleGroupStatus, filter query.Filter) (hit, selected bool) {
|
||||
|
||||
switch filter.Field {
|
||||
case kapialertingv2beta1.FieldState:
|
||||
hit = true
|
||||
switch string(filter.Value) {
|
||||
case stateDisabledString:
|
||||
selected = status.RulesStats.Disabled > 0
|
||||
case stateInactiveString:
|
||||
selected = status.RulesStats.Inactive > 0
|
||||
case statePendingString:
|
||||
selected = status.RulesStats.Pending > 0
|
||||
case stateFiringString:
|
||||
selected = status.RulesStats.Firing > 0
|
||||
case "":
|
||||
selected = true
|
||||
default:
|
||||
selected = false
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) ListAlerts(ctx context.Context, namespace string,
|
||||
queryParam *query.Query) (*api.ListResult, error) {
|
||||
|
||||
groups, err := o.listRuleGroups(ctx, namespace, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// encapsulated as runtime.Object for easy comparison and filtering.
|
||||
var alerts []runtime.Object
|
||||
for i := range groups {
|
||||
g := groups[i].(*kapialertingv2beta1.RuleGroup)
|
||||
for j := range g.Status.RulesStatus {
|
||||
ruleStatus := g.Status.RulesStatus[j]
|
||||
for k := range ruleStatus.Alerts {
|
||||
alerts = append(alerts, &wrapAlert{Alert: *ruleStatus.Alerts[k]})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filterAlert, err := o.createFilterAlertFunc(queryParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listResult := resources.DefaultList(alerts, queryParam, func(left, right runtime.Object, field query.Field) bool {
|
||||
return o.compareAlert(&left.(*wrapAlert).Alert, &right.(*wrapAlert).Alert, field)
|
||||
}, func(obj runtime.Object, filter query.Filter) bool {
|
||||
return filterAlert(&obj.(*wrapAlert).Alert, filter)
|
||||
})
|
||||
for i := range listResult.Items {
|
||||
listResult.Items[i] = &listResult.Items[i].(*wrapAlert).Alert
|
||||
}
|
||||
return listResult, nil
|
||||
}
|
||||
|
||||
func (d *ruleGroupOperator) compareAlert(left, right *kapialertingv2beta1.Alert, field query.Field) bool {
|
||||
switch field {
|
||||
case kapialertingv2beta1.FieldAlertActiveAt:
|
||||
if left.ActiveAt == nil {
|
||||
return false
|
||||
}
|
||||
if right.ActiveAt == nil {
|
||||
return true
|
||||
}
|
||||
return left.ActiveAt.After(*right.ActiveAt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *ruleGroupOperator) createFilterAlertFunc(queryParam *query.Query) (func(alert *kapialertingv2beta1.Alert, filter query.Filter) bool, error) {
|
||||
var labelFilters kapialertingv2beta1.LabelFilters
|
||||
var labelMatchers []*promlabels.Matcher
|
||||
var err error
|
||||
if len(queryParam.Filters) > 0 {
|
||||
if filters, ok := queryParam.Filters[kapialertingv2beta1.FieldAlertLabelFilters]; ok {
|
||||
labelFilters = kapialertingv2beta1.ParseLabelFilters(string(filters))
|
||||
}
|
||||
if matcherStr, ok := queryParam.Filters[kapialertingv2beta1.FieldAlertLabelMatcher]; ok {
|
||||
labelMatchers, err = parser.ParseMetricSelector(string(matcherStr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid %s param: %v", kapialertingv2beta1.FieldAlertLabelMatcher, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return func(alert *kapialertingv2beta1.Alert, filter query.Filter) bool {
|
||||
switch filter.Field {
|
||||
case kapialertingv2beta1.FieldAlertLabelFilters:
|
||||
if labelFilters == nil {
|
||||
return true
|
||||
}
|
||||
return labelFilters.Matches(alert.Labels)
|
||||
case kapialertingv2beta1.FieldAlertLabelMatcher:
|
||||
for _, m := range labelMatchers {
|
||||
var v string
|
||||
if len(alert.Labels) > 0 {
|
||||
v = alert.Labels[m.Name]
|
||||
}
|
||||
if !m.Matches(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case kapialertingv2beta1.FieldState:
|
||||
return alert.State == string(filter.Value)
|
||||
}
|
||||
return false
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) GetRuleGroup(ctx context.Context, namespace, name string) (*kapialertingv2beta1.RuleGroup, error) {
|
||||
resourceRuleGroup, err := o.ruleGroupLister.RuleGroups(namespace).Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setRuleGroupResourceTypeMeta(resourceRuleGroup)
|
||||
|
||||
ret := &kapialertingv2beta1.RuleGroup{
|
||||
RuleGroup: *resourceRuleGroup,
|
||||
Status: kapialertingv2beta1.RuleGroupStatus{
|
||||
RulesStatus: make([]kapialertingv2beta1.RuleStatus, len(resourceRuleGroup.Spec.Rules)),
|
||||
RulesStats: kapialertingv2beta1.RulesStats{},
|
||||
},
|
||||
}
|
||||
|
||||
matchers := []*promlabels.Matcher{{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyRuleLevel,
|
||||
Value: string(controller.RuleLevelNamesapce),
|
||||
}, {
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyNamespace,
|
||||
Value: namespace,
|
||||
}}
|
||||
statusRuleGroups, err := o.ruleClient.ThanosRules(ctx, matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var setStatus bool
|
||||
specRules := resourceRuleGroup.Spec.Rules
|
||||
for _, g := range statusRuleGroups {
|
||||
if g.Name == resourceRuleGroup.Name && len(g.Rules) > 0 {
|
||||
var ruleIds = make([]string, len(specRules))
|
||||
var ruleDisableFlags = make([]bool, len(specRules))
|
||||
for j := range specRules {
|
||||
if specRules[j].Labels != nil {
|
||||
ruleIds[j] = specRules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]
|
||||
}
|
||||
ruleDisableFlags[j] = specRules[j].Disable
|
||||
}
|
||||
|
||||
copyRuleGroupStatus(g, &ret.Status, ruleIds, ruleDisableFlags)
|
||||
setStatus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !setStatus {
|
||||
// for rules not loaded by rule reloader (eg.thanos) yet
|
||||
for j := range ret.Spec.Rules {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if specRules[j].Disable {
|
||||
ruleStatus.State = stateDisabledString
|
||||
ret.Status.RulesStats.Disabled++
|
||||
} else {
|
||||
ruleStatus.State = stateInactiveString
|
||||
ret.Status.RulesStats.Inactive++
|
||||
}
|
||||
ret.Status.RulesStatus[j] = ruleStatus
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) listClusterRuleGroups(ctx context.Context, selector labels.Selector) ([]runtime.Object, error) {
|
||||
resourceRuleGroups, err := o.clusterRuleGroupLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get rules matching '{rule_level="cluster"}' from thanos ruler
|
||||
matchers := []*promlabels.Matcher{{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyRuleLevel,
|
||||
Value: string(controller.RuleLevelCluster),
|
||||
}}
|
||||
statusRuleGroups, err := o.ruleClient.ThanosRules(ctx, matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var statusRuleGroupMap = make(map[string]*alerting.RuleGroup)
|
||||
for i := range statusRuleGroups {
|
||||
g := statusRuleGroups[i]
|
||||
// the matchers only filter rules and all groups still return,
|
||||
// and some of them may be with empty rules, so here check them and skip some.
|
||||
if len(g.Rules) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := statusRuleGroupMap[g.Name]; !ok {
|
||||
statusRuleGroupMap[g.Name] = g
|
||||
}
|
||||
}
|
||||
// copy status info of statusRuleGroups to matched rulegroups
|
||||
var groups = make([]runtime.Object, len(resourceRuleGroups))
|
||||
for i := range resourceRuleGroups {
|
||||
setRuleGroupResourceTypeMeta(resourceRuleGroups[i])
|
||||
g := &kapialertingv2beta1.ClusterRuleGroup{
|
||||
ClusterRuleGroup: *resourceRuleGroups[i],
|
||||
Status: kapialertingv2beta1.RuleGroupStatus{
|
||||
RulesStatus: make([]kapialertingv2beta1.RuleStatus, len(resourceRuleGroups[i].Spec.Rules)),
|
||||
RulesStats: kapialertingv2beta1.RulesStats{},
|
||||
},
|
||||
}
|
||||
statusg, ok := statusRuleGroupMap[g.Name]
|
||||
specRules := g.Spec.Rules
|
||||
if ok && len(statusg.Rules) > 0 {
|
||||
var ruleIds = make([]string, len(specRules))
|
||||
var ruleDisableFlags = make([]bool, len(specRules))
|
||||
for j := range specRules {
|
||||
if specRules[j].Labels != nil {
|
||||
ruleIds[j] = specRules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]
|
||||
}
|
||||
ruleDisableFlags[j] = specRules[j].Disable
|
||||
}
|
||||
|
||||
copyRuleGroupStatus(statusg, &g.Status, ruleIds, ruleDisableFlags)
|
||||
} else {
|
||||
// for rules not loaded by rule reloader (eg.thanos) yet
|
||||
for j := range g.Spec.Rules {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if specRules[j].Disable {
|
||||
ruleStatus.State = stateDisabledString
|
||||
g.Status.RulesStats.Disabled++
|
||||
} else {
|
||||
ruleStatus.State = stateInactiveString
|
||||
g.Status.RulesStats.Inactive++
|
||||
}
|
||||
g.Status.RulesStatus[j] = ruleStatus
|
||||
}
|
||||
}
|
||||
groups[i] = g
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) ListClusterRuleGroups(ctx context.Context,
|
||||
queryParam *query.Query) (*api.ListResult, error) {
|
||||
|
||||
groups, err := o.listClusterRuleGroups(ctx, queryParam.Selector())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listResult := resources.DefaultList(groups, queryParam, func(left, right runtime.Object, field query.Field) bool {
|
||||
hit, great := o.compareRuleGroupStatus(
|
||||
&(left.(*kapialertingv2beta1.ClusterRuleGroup).Status), &(right.(*kapialertingv2beta1.ClusterRuleGroup).Status), field)
|
||||
if hit {
|
||||
return great
|
||||
}
|
||||
return resources.DefaultObjectMetaCompare(
|
||||
left.(*kapialertingv2beta1.ClusterRuleGroup).ObjectMeta, right.(*kapialertingv2beta1.ClusterRuleGroup).ObjectMeta, field)
|
||||
}, func(obj runtime.Object, filter query.Filter) bool {
|
||||
hit, selected := o.filterRuleGroupStatus(&obj.(*kapialertingv2beta1.ClusterRuleGroup).Status, filter)
|
||||
if hit {
|
||||
return selected
|
||||
}
|
||||
return resources.DefaultObjectMetaFilter(obj.(*kapialertingv2beta1.ClusterRuleGroup).ObjectMeta, filter)
|
||||
})
|
||||
|
||||
return listResult, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) ListClusterAlerts(ctx context.Context,
|
||||
queryParam *query.Query) (*api.ListResult, error) {
|
||||
|
||||
groups, err := o.listClusterRuleGroups(ctx, labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// encapsulated as runtime.Object for easy comparison and filtering.
|
||||
var alerts []runtime.Object
|
||||
for i := range groups {
|
||||
g := groups[i].(*kapialertingv2beta1.ClusterRuleGroup)
|
||||
for j := range g.Status.RulesStatus {
|
||||
ruleStatus := g.Status.RulesStatus[j]
|
||||
for k := range ruleStatus.Alerts {
|
||||
alerts = append(alerts, &wrapAlert{Alert: *ruleStatus.Alerts[k]})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filterAlert, err := o.createFilterAlertFunc(queryParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listResult := resources.DefaultList(alerts, queryParam, func(left, right runtime.Object, field query.Field) bool {
|
||||
return o.compareAlert(&left.(*wrapAlert).Alert, &right.(*wrapAlert).Alert, field)
|
||||
}, func(obj runtime.Object, filter query.Filter) bool {
|
||||
return filterAlert(&obj.(*wrapAlert).Alert, filter)
|
||||
})
|
||||
for i := range listResult.Items {
|
||||
listResult.Items[i] = &listResult.Items[i].(*wrapAlert).Alert
|
||||
}
|
||||
return listResult, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) GetClusterRuleGroup(ctx context.Context, name string) (*kapialertingv2beta1.ClusterRuleGroup, error) {
|
||||
resourceRuleGroup, err := o.clusterRuleGroupLister.Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setRuleGroupResourceTypeMeta(resourceRuleGroup)
|
||||
|
||||
ret := &kapialertingv2beta1.ClusterRuleGroup{
|
||||
ClusterRuleGroup: *resourceRuleGroup,
|
||||
Status: kapialertingv2beta1.RuleGroupStatus{
|
||||
RulesStatus: make([]kapialertingv2beta1.RuleStatus, len(resourceRuleGroup.Spec.Rules)),
|
||||
RulesStats: kapialertingv2beta1.RulesStats{},
|
||||
},
|
||||
}
|
||||
|
||||
matchers := []*promlabels.Matcher{{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyRuleLevel,
|
||||
Value: string(controller.RuleLevelCluster),
|
||||
}}
|
||||
statusRuleGroups, err := o.ruleClient.ThanosRules(ctx, matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var setStatus bool
|
||||
specRules := resourceRuleGroup.Spec.Rules
|
||||
for _, g := range statusRuleGroups {
|
||||
if g.Name == resourceRuleGroup.Name && len(g.Rules) > 0 {
|
||||
var ruleIds = make([]string, len(specRules))
|
||||
var ruleDisableFlags = make([]bool, len(specRules))
|
||||
for j := range specRules {
|
||||
if specRules[j].Labels != nil {
|
||||
ruleIds[j] = specRules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]
|
||||
}
|
||||
ruleDisableFlags[j] = specRules[j].Disable
|
||||
}
|
||||
|
||||
copyRuleGroupStatus(g, &ret.Status, ruleIds, ruleDisableFlags)
|
||||
setStatus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !setStatus {
|
||||
// for rules not loaded by rule reloader (eg.thanos) yet
|
||||
for j := range ret.Spec.Rules {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if specRules[j].Disable {
|
||||
ruleStatus.State = stateDisabledString
|
||||
ret.Status.RulesStats.Disabled++
|
||||
} else {
|
||||
ruleStatus.State = stateInactiveString
|
||||
ret.Status.RulesStats.Inactive++
|
||||
}
|
||||
ret.Status.RulesStatus[j] = ruleStatus
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) listGlobalRuleGroups(ctx context.Context, selector labels.Selector) ([]runtime.Object, error) {
|
||||
resourceRuleGroups, err := o.globalRuleGroupLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get rules matching '{rule_level="global"}' from thanos ruler
|
||||
matchers := []*promlabels.Matcher{{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyRuleLevel,
|
||||
Value: string(controller.RuleLevelGlobal),
|
||||
}}
|
||||
statusRuleGroups, err := o.ruleClient.ThanosRules(ctx, matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var statusRuleGroupMap = make(map[string]*alerting.RuleGroup)
|
||||
for i := range statusRuleGroups {
|
||||
g := statusRuleGroups[i]
|
||||
// the matchers only filter rules and all groups still return,
|
||||
// and some of them may be with empty rules, so here check them and skip some.
|
||||
if len(g.Rules) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := statusRuleGroupMap[g.Name]; !ok {
|
||||
statusRuleGroupMap[g.Name] = g
|
||||
}
|
||||
}
|
||||
// copy status info of statusRuleGroups to matched rulegroups
|
||||
var groups = make([]runtime.Object, len(resourceRuleGroups))
|
||||
for i := range resourceRuleGroups {
|
||||
setRuleGroupResourceTypeMeta(resourceRuleGroups[i])
|
||||
g := &kapialertingv2beta1.GlobalRuleGroup{
|
||||
GlobalRuleGroup: *resourceRuleGroups[i],
|
||||
Status: kapialertingv2beta1.RuleGroupStatus{
|
||||
RulesStatus: make([]kapialertingv2beta1.RuleStatus, len(resourceRuleGroups[i].Spec.Rules)),
|
||||
RulesStats: kapialertingv2beta1.RulesStats{},
|
||||
},
|
||||
}
|
||||
statusg, ok := statusRuleGroupMap[g.Name]
|
||||
specRules := g.Spec.Rules
|
||||
if ok && len(statusg.Rules) > 0 {
|
||||
var ruleIds = make([]string, len(specRules))
|
||||
var ruleDisableFlags = make([]bool, len(specRules))
|
||||
for j := range specRules {
|
||||
if specRules[j].Labels != nil {
|
||||
ruleIds[j] = specRules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]
|
||||
}
|
||||
ruleDisableFlags[j] = specRules[j].Disable
|
||||
}
|
||||
|
||||
copyRuleGroupStatus(statusg, &g.Status, ruleIds, ruleDisableFlags)
|
||||
for j := range g.Status.RulesStatus {
|
||||
// for rules disabled and etc.
|
||||
if g.Status.RulesStatus[j].Expr == "" {
|
||||
rule := g.Spec.Rules[j]
|
||||
enforceExprFunc := controller.CreateEnforceExprFunc(controller.ParseGlobalRuleEnforceMatchers(&rule))
|
||||
expr, err := enforceExprFunc(rule.Expr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g.Status.RulesStatus[j].Expr = expr
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// for rules not loaded by rule reloader (eg.thanos) yet
|
||||
for j, rule := range specRules {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if specRules[j].Disable {
|
||||
ruleStatus.State = stateDisabledString
|
||||
g.Status.RulesStats.Disabled++
|
||||
} else {
|
||||
ruleStatus.State = stateInactiveString
|
||||
g.Status.RulesStats.Inactive++
|
||||
}
|
||||
enforceExprFunc := controller.CreateEnforceExprFunc(controller.ParseGlobalRuleEnforceMatchers(&rule))
|
||||
expr, err := enforceExprFunc(rule.Expr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ruleStatus.Expr = expr
|
||||
g.Status.RulesStatus[j] = ruleStatus
|
||||
}
|
||||
}
|
||||
groups[i] = g
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) ListGlobalRuleGroups(ctx context.Context,
|
||||
queryParam *query.Query) (*api.ListResult, error) {
|
||||
|
||||
selector := queryParam.Selector()
|
||||
if val, ok := queryParam.Filters[kapialertingv2beta1.FieldBuiltin]; ok {
|
||||
// add match requirement to the selector to select only builtin or custom rulegroups
|
||||
var operator selection.Operator
|
||||
if val == controller.PrometheusRuleResourceLabelValueBuiltinTrue {
|
||||
operator = selection.Equals
|
||||
} else {
|
||||
operator = selection.NotEquals
|
||||
}
|
||||
requirement, _ := labels.NewRequirement(
|
||||
controller.PrometheusRuleResourceLabelKeyBuiltin,
|
||||
operator,
|
||||
[]string{controller.PrometheusRuleResourceLabelValueBuiltinTrue})
|
||||
selector = selector.Add(*requirement)
|
||||
}
|
||||
groups, err := o.listGlobalRuleGroups(ctx, selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listResult := resources.DefaultList(groups, queryParam, func(left, right runtime.Object, field query.Field) bool {
|
||||
hit, great := o.compareRuleGroupStatus(
|
||||
&(left.(*kapialertingv2beta1.GlobalRuleGroup).Status), &(right.(*kapialertingv2beta1.GlobalRuleGroup).Status), field)
|
||||
if hit {
|
||||
return great
|
||||
}
|
||||
return resources.DefaultObjectMetaCompare(
|
||||
left.(*kapialertingv2beta1.GlobalRuleGroup).ObjectMeta, right.(*kapialertingv2beta1.GlobalRuleGroup).ObjectMeta, field)
|
||||
}, func(obj runtime.Object, filter query.Filter) bool {
|
||||
if filter.Field == kapialertingv2beta1.FieldBuiltin { // ignoring this filter because it is filtered at the front
|
||||
return true
|
||||
}
|
||||
hit, selected := o.filterRuleGroupStatus(&obj.(*kapialertingv2beta1.GlobalRuleGroup).Status, filter)
|
||||
if hit {
|
||||
return selected
|
||||
}
|
||||
return resources.DefaultObjectMetaFilter(obj.(*kapialertingv2beta1.GlobalRuleGroup).ObjectMeta, filter)
|
||||
})
|
||||
|
||||
return listResult, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) ListGlobalAlerts(ctx context.Context,
|
||||
queryParam *query.Query) (*api.ListResult, error) {
|
||||
|
||||
selector := labels.Everything()
|
||||
if val, ok := queryParam.Filters[kapialertingv2beta1.FieldBuiltin]; ok {
|
||||
// add match requirement to the selector to select only builtin or custom rulegroups
|
||||
var operator selection.Operator
|
||||
if val == controller.PrometheusRuleResourceLabelValueBuiltinTrue {
|
||||
operator = selection.Equals
|
||||
} else {
|
||||
operator = selection.NotEquals
|
||||
}
|
||||
requirement, _ := labels.NewRequirement(
|
||||
controller.PrometheusRuleResourceLabelKeyBuiltin,
|
||||
operator,
|
||||
[]string{controller.PrometheusRuleResourceLabelValueBuiltinTrue})
|
||||
selector = selector.Add(*requirement)
|
||||
}
|
||||
groups, err := o.listGlobalRuleGroups(ctx, selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// encapsulated as runtime.Object for easy comparison and filtering.
|
||||
var alerts []runtime.Object
|
||||
for i := range groups {
|
||||
wrapg := groups[i].(*kapialertingv2beta1.GlobalRuleGroup)
|
||||
for j := range wrapg.Status.RulesStatus {
|
||||
ruleStatus := wrapg.Status.RulesStatus[j]
|
||||
for k := range ruleStatus.Alerts {
|
||||
alerts = append(alerts, &wrapAlert{Alert: *ruleStatus.Alerts[k]})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filterAlert, err := o.createFilterAlertFunc(queryParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listResult := resources.DefaultList(alerts, queryParam, func(left, right runtime.Object, field query.Field) bool {
|
||||
return o.compareAlert(&left.(*wrapAlert).Alert, &right.(*wrapAlert).Alert, field)
|
||||
}, func(obj runtime.Object, filter query.Filter) bool {
|
||||
if filter.Field == kapialertingv2beta1.FieldBuiltin { // ignoring this filter because it is filtered at the front
|
||||
return true
|
||||
}
|
||||
return filterAlert(&obj.(*wrapAlert).Alert, filter)
|
||||
})
|
||||
for i := range listResult.Items {
|
||||
listResult.Items[i] = &listResult.Items[i].(*wrapAlert).Alert
|
||||
}
|
||||
return listResult, nil
|
||||
}
|
||||
|
||||
func (o *ruleGroupOperator) GetGlobalRuleGroup(ctx context.Context, name string) (*kapialertingv2beta1.GlobalRuleGroup, error) {
|
||||
resourceRuleGroup, err := o.globalRuleGroupLister.Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setRuleGroupResourceTypeMeta(resourceRuleGroup)
|
||||
|
||||
ret := &kapialertingv2beta1.GlobalRuleGroup{
|
||||
GlobalRuleGroup: *resourceRuleGroup,
|
||||
Status: kapialertingv2beta1.RuleGroupStatus{
|
||||
RulesStatus: make([]kapialertingv2beta1.RuleStatus, len(resourceRuleGroup.Spec.Rules)),
|
||||
RulesStats: kapialertingv2beta1.RulesStats{},
|
||||
},
|
||||
}
|
||||
|
||||
matchers := []*promlabels.Matcher{{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: controller.RuleLabelKeyRuleLevel,
|
||||
Value: string(controller.RuleLevelGlobal),
|
||||
}}
|
||||
statusRuleGroups, err := o.ruleClient.ThanosRules(ctx, matchers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var setStatus bool
|
||||
specRules := resourceRuleGroup.Spec.Rules
|
||||
for _, g := range statusRuleGroups {
|
||||
if g.Name == resourceRuleGroup.Name && len(g.Rules) > 0 {
|
||||
var ruleIds = make([]string, len(specRules))
|
||||
var ruleDisableFlags = make([]bool, len(specRules))
|
||||
for j := range specRules {
|
||||
if specRules[j].Labels != nil {
|
||||
ruleIds[j] = specRules[j].Labels[alertingv2beta1.RuleLabelKeyRuleId]
|
||||
}
|
||||
ruleDisableFlags[j] = specRules[j].Disable
|
||||
}
|
||||
|
||||
copyRuleGroupStatus(g, &ret.Status, ruleIds, ruleDisableFlags)
|
||||
for j := range ret.Status.RulesStatus {
|
||||
// for rules disabled and etc.
|
||||
if ret.Status.RulesStatus[j].Expr == "" {
|
||||
rule := ret.Spec.Rules[j]
|
||||
enforceExprFunc := controller.CreateEnforceExprFunc(controller.ParseGlobalRuleEnforceMatchers(&rule))
|
||||
expr, err := enforceExprFunc(rule.Expr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret.Status.RulesStatus[j].Expr = expr
|
||||
}
|
||||
}
|
||||
setStatus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !setStatus {
|
||||
// for rules not loaded by rule reloader (eg.thanos) yet
|
||||
for j, rule := range ret.Spec.Rules {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if specRules[j].Disable {
|
||||
ruleStatus.State = stateDisabledString
|
||||
ret.Status.RulesStats.Disabled++
|
||||
} else {
|
||||
ruleStatus.State = stateInactiveString
|
||||
ret.Status.RulesStats.Inactive++
|
||||
}
|
||||
ret.Status.RulesStatus = append(ret.Status.RulesStatus, ruleStatus)
|
||||
enforceExprFunc := controller.CreateEnforceExprFunc(controller.ParseGlobalRuleEnforceMatchers(&rule))
|
||||
expr, err := enforceExprFunc(rule.Expr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ruleStatus.Expr = expr
|
||||
ret.Status.RulesStatus[j] = ruleStatus
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// copyRuleGroupStatus copies group/rule status and alerts from source to target
|
||||
func copyRuleGroupStatus(source *alerting.RuleGroup, target *kapialertingv2beta1.RuleGroupStatus, ruleIds []string, ruleDisableFlags []bool) {
|
||||
target.LastEvaluation = source.LastEvaluation
|
||||
if source.EvaluationTime > 0 {
|
||||
target.EvaluationTime = &source.EvaluationTime
|
||||
}
|
||||
|
||||
sourceRuleMap := make(map[string]*alerting.AlertingRule, len(source.Rules))
|
||||
for i := range source.Rules {
|
||||
rule := source.Rules[i]
|
||||
if len(rule.Labels) > 0 {
|
||||
if ruleId, ok := rule.Labels[alertingv2beta1.RuleLabelKeyRuleId]; ok {
|
||||
sourceRuleMap[ruleId] = rule
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, ruleId := range ruleIds {
|
||||
rule, ok := sourceRuleMap[ruleId]
|
||||
if !ok {
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{Health: string(promrules.HealthUnknown)}
|
||||
if ruleDisableFlags[i] {
|
||||
ruleStatus.State = stateDisabledString
|
||||
target.RulesStats.Disabled++
|
||||
} else {
|
||||
// rules are being loaded
|
||||
ruleStatus.State = stateInactiveString
|
||||
target.RulesStats.Inactive++
|
||||
}
|
||||
target.RulesStatus[i] = ruleStatus
|
||||
continue
|
||||
}
|
||||
|
||||
switch rule.State {
|
||||
case statePendingString:
|
||||
target.RulesStats.Pending++
|
||||
case stateFiringString:
|
||||
target.RulesStats.Firing++
|
||||
case stateInactiveString:
|
||||
target.RulesStats.Inactive++
|
||||
}
|
||||
|
||||
var ruleActiveAt *time.Time
|
||||
alerts := []*kapialertingv2beta1.Alert{}
|
||||
for _, alert := range rule.Alerts {
|
||||
alerts = append(alerts, &kapialertingv2beta1.Alert{
|
||||
ActiveAt: alert.ActiveAt,
|
||||
Annotations: alert.Annotations,
|
||||
Labels: alert.Labels,
|
||||
State: alert.State,
|
||||
Value: alert.Value,
|
||||
})
|
||||
if alert.ActiveAt != nil && (ruleActiveAt == nil || alert.ActiveAt.Before(*ruleActiveAt)) {
|
||||
ruleActiveAt = alert.ActiveAt
|
||||
}
|
||||
}
|
||||
ruleStatus := kapialertingv2beta1.RuleStatus{
|
||||
State: rule.State,
|
||||
Health: rule.Health,
|
||||
LastError: rule.LastError,
|
||||
EvaluationTime: rule.EvaluationTime,
|
||||
LastEvaluation: rule.LastEvaluation,
|
||||
ActiveAt: ruleActiveAt,
|
||||
Alerts: alerts,
|
||||
}
|
||||
if len(rule.Labels) > 0 {
|
||||
if level, ok := rule.Labels[controller.RuleLabelKeyRuleLevel]; ok &&
|
||||
level == string(controller.RuleLevelGlobal) { // provided only for global rules
|
||||
ruleStatus.Expr = rule.Query
|
||||
}
|
||||
}
|
||||
target.RulesStatus[i] = ruleStatus
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
statePendingString = promrules.StatePending.String()
|
||||
stateFiringString = promrules.StateFiring.String()
|
||||
stateInactiveString = promrules.StateInactive.String()
|
||||
stateDisabledString = "disabled"
|
||||
)
|
||||
|
||||
type wrapAlert struct {
|
||||
kapialertingv2beta1.Alert
|
||||
runtime.Object
|
||||
}
|
||||
|
||||
var (
|
||||
apiVersion = alertingv2beta1.SchemeGroupVersion.String()
|
||||
)
|
||||
|
||||
// set TypeMeta info because the objects getted by lister.List() and lister.Get() may have no TypeMeta
|
||||
// related issue: https://github.com/kubernetes/client-go/issues/541
|
||||
func setRuleGroupResourceTypeMeta(obj runtime.Object) {
|
||||
switch o := obj.(type) {
|
||||
case *alertingv2beta1.RuleGroup:
|
||||
o.APIVersion = apiVersion
|
||||
o.Kind = alertingv2beta1.ResourceKindRuleGroup
|
||||
case *alertingv2beta1.ClusterRuleGroup:
|
||||
o.APIVersion = apiVersion
|
||||
o.Kind = alertingv2beta1.ResourceKindClusterRuleGroup
|
||||
case *alertingv2beta1.GlobalRuleGroup:
|
||||
o.APIVersion = apiVersion
|
||||
o.Kind = alertingv2beta1.ResourceKindGlobalRuleGroup
|
||||
}
|
||||
}
|
||||
@@ -1,277 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package rules
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
prominformersv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
)
|
||||
|
||||
// RuleCache caches all rules from the prometheusrule custom resources
|
||||
type RuleCache struct {
|
||||
lock sync.RWMutex
|
||||
namespaces map[string]*namespaceRuleCache
|
||||
}
|
||||
|
||||
func NewRuleCache(ruleResourceInformer prominformersv1.PrometheusRuleInformer) *RuleCache {
|
||||
rc := RuleCache{
|
||||
namespaces: make(map[string]*namespaceRuleCache),
|
||||
}
|
||||
|
||||
ruleResourceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rc.addCache,
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
rc.addCache(newObj)
|
||||
},
|
||||
DeleteFunc: rc.deleteCache,
|
||||
})
|
||||
return &rc
|
||||
}
|
||||
|
||||
func (c *RuleCache) addCache(referObj interface{}) {
|
||||
pr, ok := referObj.(*promresourcesv1.PrometheusRule)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
cr := parseRuleResource(pr)
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
cn, ok := c.namespaces[pr.Namespace]
|
||||
if !ok || cn == nil {
|
||||
cn = &namespaceRuleCache{
|
||||
namespace: pr.Namespace,
|
||||
resources: make(map[string]*resourceRuleCache),
|
||||
}
|
||||
c.namespaces[pr.Namespace] = cn
|
||||
}
|
||||
cn.resources[pr.Name] = cr
|
||||
}
|
||||
|
||||
func (c *RuleCache) deleteCache(referObj interface{}) {
|
||||
pr, ok := referObj.(*promresourcesv1.PrometheusRule)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
cn, ok := c.namespaces[pr.Namespace]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(cn.resources, pr.Name)
|
||||
if len(cn.resources) == 0 {
|
||||
delete(c.namespaces, pr.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RuleCache) getResourceRuleCaches(ruler Ruler, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector) (map[string]*resourceRuleCache, error) {
|
||||
|
||||
selected, err := ruleNamespaceSelected(ruler, ruleNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !selected {
|
||||
return nil, nil
|
||||
}
|
||||
rSelector, err := ruler.RuleResourceSelector(extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var m = make(map[string]*resourceRuleCache)
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
cn, ok := c.namespaces[ruleNamespace.Name]
|
||||
if ok && cn != nil {
|
||||
for _, cr := range cn.resources {
|
||||
if rSelector.Matches(labels.Set(cr.Labels)) {
|
||||
m[cr.Name] = cr
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *RuleCache) GetRule(ruler Ruler, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, idOrName string) (*ResourceRuleItem, error) {
|
||||
|
||||
rules, err := c.GetRuleByIdOrName(ruler, ruleNamespace, extraRuleResourceSelector, idOrName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l := len(rules); l == 0 {
|
||||
return nil, nil
|
||||
} else if l > 1 {
|
||||
// guarantees the stability of the get operations.
|
||||
sort.Slice(rules, func(i, j int) bool {
|
||||
return v2alpha1.AlertingRuleIdCompare(rules[i].Id, rules[j].Id)
|
||||
})
|
||||
}
|
||||
return rules[0], nil
|
||||
}
|
||||
|
||||
func (c *RuleCache) GetRuleByIdOrName(ruler Ruler, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, idOrName string) ([]*ResourceRuleItem, error) {
|
||||
|
||||
caches, err := c.getResourceRuleCaches(ruler, ruleNamespace, extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(caches) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var rules []*ResourceRuleItem
|
||||
switch ruler.(type) {
|
||||
case *PrometheusRuler:
|
||||
for rn, rc := range caches {
|
||||
if rule, ok := rc.IdRules[idOrName]; ok {
|
||||
rules = append(rules, &ResourceRuleItem{
|
||||
RuleWithGroup: RuleWithGroup{
|
||||
Group: rule.Group,
|
||||
Id: rule.Id,
|
||||
Rule: *rule.Rule.DeepCopy(),
|
||||
},
|
||||
ResourceName: rn,
|
||||
})
|
||||
}
|
||||
}
|
||||
case *ThanosRuler:
|
||||
for rn, rc := range caches {
|
||||
if nrules, ok := rc.NameRules[idOrName]; ok {
|
||||
for _, nrule := range nrules {
|
||||
rules = append(rules, &ResourceRuleItem{
|
||||
RuleWithGroup: RuleWithGroup{
|
||||
Group: nrule.Group,
|
||||
Id: nrule.Id,
|
||||
Rule: *nrule.Rule.DeepCopy(),
|
||||
},
|
||||
ResourceName: rn,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unsupported ruler type")
|
||||
}
|
||||
|
||||
return rules, err
|
||||
}
|
||||
|
||||
func (c *RuleCache) ListRules(ruler Ruler, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector) (map[string]*ResourceRuleCollection, error) {
|
||||
|
||||
caches, err := c.getResourceRuleCaches(ruler, ruleNamespace, extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(caches) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ret := make(map[string]*ResourceRuleCollection)
|
||||
for rn, rc := range caches {
|
||||
rrs := &ResourceRuleCollection{
|
||||
GroupSet: make(map[string]struct{}),
|
||||
IdRules: make(map[string]*ResourceRuleItem),
|
||||
NameRules: make(map[string][]*ResourceRuleItem),
|
||||
}
|
||||
for name, rules := range rc.NameRules {
|
||||
for _, rule := range rules {
|
||||
rrs.GroupSet[rule.Group] = struct{}{}
|
||||
rr := &ResourceRuleItem{
|
||||
RuleWithGroup: RuleWithGroup{
|
||||
Group: rule.Group,
|
||||
Id: rule.Id,
|
||||
Rule: *rule.Rule.DeepCopy(),
|
||||
},
|
||||
ResourceName: rn,
|
||||
}
|
||||
rrs.IdRules[rr.Id] = rr
|
||||
rrs.NameRules[name] = append(rrs.NameRules[name], rr)
|
||||
}
|
||||
}
|
||||
if len(rrs.IdRules) > 0 {
|
||||
ret[rn] = rrs
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type namespaceRuleCache struct {
|
||||
namespace string
|
||||
resources map[string]*resourceRuleCache
|
||||
}
|
||||
|
||||
type resourceRuleCache struct {
|
||||
Name string
|
||||
Labels map[string]string
|
||||
GroupSet map[string]struct{}
|
||||
IdRules map[string]*cacheRule
|
||||
NameRules map[string][]*cacheRule
|
||||
}
|
||||
|
||||
type cacheRule struct {
|
||||
Group string
|
||||
Id string
|
||||
Rule *promresourcesv1.Rule
|
||||
}
|
||||
|
||||
func parseRuleResource(pr *promresourcesv1.PrometheusRule) *resourceRuleCache {
|
||||
var (
|
||||
groupSet = make(map[string]struct{})
|
||||
idRules = make(map[string]*cacheRule)
|
||||
nameRules = make(map[string][]*cacheRule)
|
||||
)
|
||||
for i := 0; i < len(pr.Spec.Groups); i++ {
|
||||
g := pr.Spec.Groups[i]
|
||||
for j := 0; j < len(g.Rules); j++ {
|
||||
gr := g.Rules[j]
|
||||
if gr.Alert == "" {
|
||||
continue
|
||||
}
|
||||
groupSet[g.Name] = struct{}{}
|
||||
cr := &cacheRule{
|
||||
Group: g.Name,
|
||||
Id: GenResourceRuleIdIgnoreFormat(g.Name, &gr),
|
||||
Rule: &gr,
|
||||
}
|
||||
nameRules[cr.Rule.Alert] = append(nameRules[cr.Rule.Alert], cr)
|
||||
idRules[cr.Id] = cr
|
||||
}
|
||||
}
|
||||
return &resourceRuleCache{
|
||||
Name: pr.Name,
|
||||
Labels: pr.Labels,
|
||||
GroupSet: groupSet,
|
||||
IdRules: idRules,
|
||||
NameRules: nameRules,
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package rules
|
||||
|
||||
import (
|
||||
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1"
|
||||
)
|
||||
|
||||
type ResourceRuleCollection struct {
|
||||
GroupSet map[string]struct{}
|
||||
IdRules map[string]*ResourceRuleItem
|
||||
NameRules map[string][]*ResourceRuleItem
|
||||
}
|
||||
|
||||
type ResourceRuleItem struct {
|
||||
ResourceName string
|
||||
RuleWithGroup
|
||||
}
|
||||
|
||||
type ResourceRule struct {
|
||||
Level v2alpha1.RuleLevel
|
||||
Custom bool
|
||||
ResourceRuleItem
|
||||
}
|
||||
|
||||
type ResourceRuleChunk struct {
|
||||
Level v2alpha1.RuleLevel
|
||||
Custom bool
|
||||
ResourceRulesMap map[string]*ResourceRuleCollection
|
||||
}
|
||||
|
||||
type RuleWithGroup struct {
|
||||
Group string
|
||||
Id string
|
||||
promresourcesv1.Rule
|
||||
}
|
||||
@@ -1,879 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/moby/locker"
|
||||
"github.com/pkg/errors"
|
||||
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
prominformersv1 "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions/monitoring/v1"
|
||||
promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
customAlertingRuleResourcePrefix = "custom-alerting-rule-"
|
||||
|
||||
CustomRuleResourceLabelKeyLevel = "custom-alerting-rule-level"
|
||||
|
||||
customRuleGroupDefaultPrefix = "alerting.custom.defaults."
|
||||
customRuleGroupSize = 20
|
||||
)
|
||||
|
||||
var (
|
||||
maxSecretSize = corev1.MaxSecretSize
|
||||
maxConfigMapDataSize = int(float64(maxSecretSize) * 0.45)
|
||||
|
||||
errOutOfConfigMapSize = errors.New("out of config map size")
|
||||
|
||||
ruleResourceLocker locker.Locker
|
||||
)
|
||||
|
||||
type Ruler interface {
|
||||
Namespace() string
|
||||
RuleResourceNamespaceSelector() (labels.Selector, error)
|
||||
RuleResourceSelector(extraRuleResourceSelector labels.Selector) (labels.Selector, error)
|
||||
ExternalLabels() func() map[string]string
|
||||
|
||||
ListRuleResources(ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector) (
|
||||
[]*promresourcesv1.PrometheusRule, error)
|
||||
AddAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector,
|
||||
ruleResourceLabels map[string]string, rules ...*RuleWithGroup) ([]*v2alpha1.BulkItemResponse, error)
|
||||
UpdateAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector,
|
||||
ruleResourceLabels map[string]string, ruleItems ...*ResourceRuleItem) ([]*v2alpha1.BulkItemResponse, error)
|
||||
DeleteAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
ruleItems ...*ResourceRuleItem) ([]*v2alpha1.BulkItemResponse, error)
|
||||
}
|
||||
|
||||
type ruleResource promresourcesv1.PrometheusRule
|
||||
|
||||
// deleteAlertingRules deletes the rules.
|
||||
// If there are rules to be deleted, return true to indicate the resource should be updated.
|
||||
func (r *ruleResource) deleteAlertingRules(rules ...*RuleWithGroup) (bool, error) {
|
||||
var (
|
||||
gs []promresourcesv1.RuleGroup
|
||||
dels = make(map[string]struct{})
|
||||
commit bool
|
||||
)
|
||||
|
||||
for _, rule := range rules {
|
||||
if rule != nil {
|
||||
dels[rule.Alert] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, g := range r.Spec.Groups {
|
||||
var rules []promresourcesv1.Rule
|
||||
for _, gr := range g.Rules {
|
||||
if gr.Alert != "" {
|
||||
if _, ok := dels[gr.Alert]; ok {
|
||||
commit = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
rules = append(rules, gr)
|
||||
}
|
||||
if len(rules) > 0 {
|
||||
gs = append(gs, promresourcesv1.RuleGroup{
|
||||
Name: g.Name,
|
||||
Interval: g.Interval,
|
||||
PartialResponseStrategy: g.PartialResponseStrategy,
|
||||
Rules: rules,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if commit {
|
||||
r.Spec.Groups = gs
|
||||
}
|
||||
return commit, nil
|
||||
}
|
||||
|
||||
// updateAlertingRules updates the rules.
|
||||
// If there are rules to be updated, return true to indicate the resource should be updated.
|
||||
func (r *ruleResource) updateAlertingRules(rules ...*RuleWithGroup) (bool, error) {
|
||||
var (
|
||||
commit bool
|
||||
spec = r.Spec.DeepCopy()
|
||||
ruleMap = make(map[string]*RuleWithGroup)
|
||||
ruleGroups = make(map[string]map[string]struct{}) // mapping of name to group
|
||||
)
|
||||
|
||||
if spec == nil {
|
||||
return false, nil
|
||||
}
|
||||
spec.Groups = nil
|
||||
|
||||
for i, rule := range rules {
|
||||
if rule != nil {
|
||||
ruleMap[rule.Alert] = rules[i]
|
||||
ruleGroups[rule.Alert] = make(map[string]struct{})
|
||||
}
|
||||
}
|
||||
|
||||
// Firstly delete the old rules
|
||||
for _, g := range r.Spec.Groups {
|
||||
var rules []promresourcesv1.Rule
|
||||
for _, r := range g.Rules {
|
||||
if r.Alert != "" {
|
||||
if _, ok := ruleMap[r.Alert]; ok {
|
||||
ruleGroups[r.Alert][g.Name] = struct{}{}
|
||||
commit = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
rules = append(rules, r)
|
||||
}
|
||||
if len(rules) > 0 {
|
||||
spec.Groups = append(spec.Groups, promresourcesv1.RuleGroup{
|
||||
Name: g.Name,
|
||||
Interval: g.Interval,
|
||||
PartialResponseStrategy: g.PartialResponseStrategy,
|
||||
Rules: rules,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var referNs = r.Namespace
|
||||
if len(r.Labels) > 0 {
|
||||
if l, ok := r.Labels[CustomRuleResourceLabelKeyLevel]; ok &&
|
||||
v2alpha1.RuleLevel(l) == v2alpha1.RuleLevelCluster {
|
||||
referNs = ""
|
||||
}
|
||||
}
|
||||
|
||||
addRules := func(g *promresourcesv1.RuleGroup) bool {
|
||||
var add bool
|
||||
var num = customRuleGroupSize - len(g.Rules)
|
||||
if num > 0 {
|
||||
for name, rule := range ruleMap {
|
||||
if num <= 0 {
|
||||
break
|
||||
}
|
||||
if gNames, ok := ruleGroups[name]; ok {
|
||||
// Add a rule to a different group than the group where it resided, to clear its alerts, etc.
|
||||
// Because Prometheus may migrate information such as alerts from the old rule into the new rule
|
||||
// when updating a rule within a group.
|
||||
if _, ok := gNames[g.Name]; !ok {
|
||||
appendRules(g, referNs, rule.Rule)
|
||||
num--
|
||||
delete(ruleMap, name)
|
||||
add = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return add
|
||||
}
|
||||
|
||||
// Then add the new rules
|
||||
var groupMax = -1
|
||||
for i, g := range spec.Groups {
|
||||
if len(ruleMap) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.HasPrefix(g.Name, customRuleGroupDefaultPrefix) {
|
||||
suf, err := strconv.Atoi(strings.TrimPrefix(g.Name, customRuleGroupDefaultPrefix))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if suf > groupMax {
|
||||
groupMax = suf
|
||||
}
|
||||
}
|
||||
|
||||
if addRules(&spec.Groups[i]) {
|
||||
commit = true
|
||||
}
|
||||
}
|
||||
|
||||
for groupMax++; len(ruleMap) > 0; groupMax++ {
|
||||
g := promresourcesv1.RuleGroup{Name: fmt.Sprintf("%s%d", customRuleGroupDefaultPrefix, groupMax)}
|
||||
|
||||
if addRules(&g) {
|
||||
spec.Groups = append(spec.Groups, g)
|
||||
commit = true
|
||||
}
|
||||
}
|
||||
|
||||
if commit {
|
||||
content, err := yaml.Marshal(spec)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to unmarshal content")
|
||||
}
|
||||
if len(string(content)) > maxConfigMapDataSize { // check size limit
|
||||
return false, errOutOfConfigMapSize
|
||||
}
|
||||
r.Spec = *spec
|
||||
}
|
||||
return commit, nil
|
||||
}
|
||||
|
||||
// addAlertingRules adds the rules.
|
||||
// If there are rules to be added, return true to indicate the resource should be updated.
|
||||
func (r *ruleResource) addAlertingRules(rules ...*RuleWithGroup) (bool, error) {
|
||||
var (
|
||||
commit bool
|
||||
spec = r.Spec.DeepCopy()
|
||||
groupMax = -1
|
||||
|
||||
cursor int // indicates which rule to start adding for the rules with no groups
|
||||
|
||||
unGroupedRules []promresourcesv1.Rule // rules that do not specify group names
|
||||
groupedRules = make(map[string][]promresourcesv1.Rule) // rules that have specific group names
|
||||
)
|
||||
|
||||
for i, rule := range rules {
|
||||
if len(strings.TrimSpace(rule.Group)) == 0 {
|
||||
unGroupedRules = append(unGroupedRules, rules[i].Rule)
|
||||
} else {
|
||||
groupedRules[rule.Group] = append(groupedRules[rule.Group], rules[i].Rule)
|
||||
}
|
||||
}
|
||||
|
||||
if spec == nil {
|
||||
spec = new(promresourcesv1.PrometheusRuleSpec)
|
||||
}
|
||||
|
||||
var referNs = r.Namespace
|
||||
if len(r.Labels) > 0 {
|
||||
if l, ok := r.Labels[CustomRuleResourceLabelKeyLevel]; ok &&
|
||||
v2alpha1.RuleLevel(l) == v2alpha1.RuleLevelCluster {
|
||||
referNs = ""
|
||||
}
|
||||
}
|
||||
|
||||
// For the rules that have specific group names, add them to the matched groups.
|
||||
// For the rules that do not specify group names, add them to the automatically generated groups until the limit is reached.
|
||||
for i, g := range spec.Groups {
|
||||
var (
|
||||
gName = g.Name
|
||||
unGroupedRulesDrained = cursor >= len(unGroupedRules) // whether all rules without groups have been added
|
||||
groupedRulesDrained = len(groupedRules) == 0 // whether all rules with groups have been added
|
||||
)
|
||||
|
||||
if unGroupedRulesDrained && groupedRulesDrained {
|
||||
break
|
||||
}
|
||||
|
||||
if !groupedRulesDrained {
|
||||
if _, ok := groupedRules[gName]; ok {
|
||||
appendRules(&spec.Groups[i], referNs, groupedRules[gName]...)
|
||||
delete(groupedRules, gName)
|
||||
commit = true
|
||||
}
|
||||
}
|
||||
|
||||
g = spec.Groups[i]
|
||||
if !unGroupedRulesDrained && strings.HasPrefix(gName, customRuleGroupDefaultPrefix) {
|
||||
suf, err := strconv.Atoi(strings.TrimPrefix(gName, customRuleGroupDefaultPrefix))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if suf > groupMax {
|
||||
groupMax = suf
|
||||
}
|
||||
|
||||
if size := len(g.Rules); size < customRuleGroupSize {
|
||||
num := customRuleGroupSize - size
|
||||
var stop int
|
||||
if stop = cursor + num; stop > len(unGroupedRules) {
|
||||
stop = len(unGroupedRules)
|
||||
}
|
||||
appendRules(&spec.Groups[i], referNs, unGroupedRules[cursor:stop]...)
|
||||
cursor = stop
|
||||
commit = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no groups are available, new groups will be created to place the remaining rules.
|
||||
for gName := range groupedRules {
|
||||
rules := groupedRules[gName]
|
||||
if len(rules) == 0 {
|
||||
continue
|
||||
}
|
||||
g := promresourcesv1.RuleGroup{Name: gName}
|
||||
appendRules(&g, referNs, rules...)
|
||||
spec.Groups = append(spec.Groups, g)
|
||||
commit = true
|
||||
}
|
||||
for groupMax++; cursor < len(rules); groupMax++ {
|
||||
g := promresourcesv1.RuleGroup{Name: fmt.Sprintf("%s%d", customRuleGroupDefaultPrefix, groupMax)}
|
||||
var stop int
|
||||
if stop = cursor + customRuleGroupSize; stop > len(unGroupedRules) {
|
||||
stop = len(unGroupedRules)
|
||||
}
|
||||
appendRules(&g, referNs, unGroupedRules[cursor:stop]...)
|
||||
spec.Groups = append(spec.Groups, g)
|
||||
cursor = stop
|
||||
commit = true
|
||||
}
|
||||
|
||||
if commit {
|
||||
content, err := yaml.Marshal(spec)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to unmarshal content")
|
||||
}
|
||||
if len(string(content)) > maxConfigMapDataSize { // check size limit
|
||||
return false, errOutOfConfigMapSize
|
||||
}
|
||||
r.Spec = *spec
|
||||
}
|
||||
return commit, nil
|
||||
}
|
||||
|
||||
func (r *ruleResource) commit(ctx context.Context, prometheusResourceClient promresourcesclient.Interface) error {
|
||||
var pr = (promresourcesv1.PrometheusRule)(*r)
|
||||
if len(pr.Spec.Groups) == 0 {
|
||||
return prometheusResourceClient.MonitoringV1().PrometheusRules(r.Namespace).Delete(ctx, r.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
npr, err := prometheusResourceClient.MonitoringV1().PrometheusRules(r.Namespace).Update(ctx, &pr, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
npr.DeepCopyInto(&pr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendRules(group *promresourcesv1.RuleGroup, referNs string, rules ...promresourcesv1.Rule) {
|
||||
if len(rules) == 0 {
|
||||
return
|
||||
}
|
||||
for i := range rules {
|
||||
r := &rules[i]
|
||||
id := GenResourceRuleIdIgnoreFormat(group.Name, r)
|
||||
if r.Labels == nil {
|
||||
r.Labels = make(map[string]string)
|
||||
}
|
||||
var v = id
|
||||
if referNs != "" {
|
||||
v = referNs + "-" + id
|
||||
}
|
||||
r.Labels[LabelKeyRuleId] = v
|
||||
}
|
||||
group.Rules = append(group.Rules, rules...)
|
||||
}
|
||||
|
||||
type PrometheusRuler struct {
|
||||
resource *promresourcesv1.Prometheus
|
||||
informer prominformersv1.PrometheusRuleInformer
|
||||
client promresourcesclient.Interface
|
||||
}
|
||||
|
||||
func NewPrometheusRuler(resource *promresourcesv1.Prometheus, informer prominformersv1.PrometheusRuleInformer,
|
||||
client promresourcesclient.Interface) Ruler {
|
||||
return &PrometheusRuler{
|
||||
resource: resource,
|
||||
informer: informer,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) Namespace() string {
|
||||
return r.resource.Namespace
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) RuleResourceNamespaceSelector() (labels.Selector, error) {
|
||||
if r.resource.Spec.RuleNamespaceSelector == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return metav1.LabelSelectorAsSelector(r.resource.Spec.RuleNamespaceSelector)
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) RuleResourceSelector(extraRuleResourceSelector labels.Selector) (labels.Selector, error) {
|
||||
rSelector, err := metav1.LabelSelectorAsSelector(r.resource.Spec.RuleSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if extraRuleResourceSelector != nil {
|
||||
if requirements, ok := extraRuleResourceSelector.Requirements(); ok {
|
||||
rSelector = rSelector.Add(requirements...)
|
||||
}
|
||||
}
|
||||
return rSelector, nil
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) ExternalLabels() func() map[string]string {
|
||||
// ignoring the external labels because rules gotten from prometheus endpoint do not include them
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) ListRuleResources(ruleNamespace *corev1.Namespace, extraRuleResourceSelector labels.Selector) (
|
||||
[]*promresourcesv1.PrometheusRule, error) {
|
||||
selected, err := ruleNamespaceSelected(r, ruleNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !selected {
|
||||
return nil, nil
|
||||
}
|
||||
rSelector, err := r.RuleResourceSelector(extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.informer.Lister().PrometheusRules(ruleNamespace.Name).List(rSelector)
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) AddAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, ruleResourceLabels map[string]string,
|
||||
rules ...*RuleWithGroup) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
return nil, errors.New("Adding Prometheus rules not supported")
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) UpdateAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, ruleResourceLabels map[string]string,
|
||||
ruleItems ...*ResourceRuleItem) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
return nil, errors.New("Updating Prometheus rules not supported")
|
||||
}
|
||||
|
||||
func (r *PrometheusRuler) DeleteAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
ruleItems ...*ResourceRuleItem) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
return nil, errors.New("Deleting Prometheus rules not supported.")
|
||||
}
|
||||
|
||||
type ThanosRuler struct {
|
||||
resource *promresourcesv1.ThanosRuler
|
||||
informer prominformersv1.PrometheusRuleInformer
|
||||
client promresourcesclient.Interface
|
||||
}
|
||||
|
||||
func NewThanosRuler(resource *promresourcesv1.ThanosRuler, informer prominformersv1.PrometheusRuleInformer,
|
||||
client promresourcesclient.Interface) Ruler {
|
||||
return &ThanosRuler{
|
||||
resource: resource,
|
||||
informer: informer,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) Namespace() string {
|
||||
return r.resource.Namespace
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) RuleResourceNamespaceSelector() (labels.Selector, error) {
|
||||
if r.resource.Spec.RuleNamespaceSelector == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return metav1.LabelSelectorAsSelector(r.resource.Spec.RuleNamespaceSelector)
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) RuleResourceSelector(extraRuleSelector labels.Selector) (labels.Selector, error) {
|
||||
rSelector, err := metav1.LabelSelectorAsSelector(r.resource.Spec.RuleSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if extraRuleSelector != nil {
|
||||
if requirements, ok := extraRuleSelector.Requirements(); ok {
|
||||
rSelector = rSelector.Add(requirements...)
|
||||
}
|
||||
}
|
||||
return rSelector, nil
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) ExternalLabels() func() map[string]string {
|
||||
// rules gotten from thanos ruler endpoint include the labels
|
||||
lbls := make(map[string]string)
|
||||
if ls := r.resource.Spec.Labels; ls != nil {
|
||||
for k, v := range ls {
|
||||
lbls[k] = v
|
||||
}
|
||||
}
|
||||
return func() map[string]string {
|
||||
return lbls
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) ListRuleResources(ruleNamespace *corev1.Namespace, extraRuleSelector labels.Selector) (
|
||||
[]*promresourcesv1.PrometheusRule, error) {
|
||||
selected, err := ruleNamespaceSelected(r, ruleNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !selected {
|
||||
return nil, nil
|
||||
}
|
||||
rSelector, err := r.RuleResourceSelector(extraRuleSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.informer.Lister().PrometheusRules(ruleNamespace.Name).List(rSelector)
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) AddAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, ruleResourceLabels map[string]string,
|
||||
rules ...*RuleWithGroup) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
|
||||
return r.addAlertingRules(ctx, ruleNamespace, extraRuleResourceSelector, nil, ruleResourceLabels, rules...)
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) addAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, excludePrometheusRules map[string]struct{},
|
||||
ruleResourceLabels map[string]string, rules ...*RuleWithGroup) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
|
||||
prometheusRules, err := r.ListRuleResources(ruleNamespace, extraRuleResourceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// sort by the left space to speed up the hit rate
|
||||
sort.Slice(prometheusRules, func(i, j int) bool {
|
||||
return len(fmt.Sprint(prometheusRules[i])) <= len(fmt.Sprint(prometheusRules[j]))
|
||||
})
|
||||
|
||||
var (
|
||||
respItems = make([]*v2alpha1.BulkItemResponse, 0, len(rules))
|
||||
cursor int
|
||||
)
|
||||
|
||||
resp := func(rule *RuleWithGroup, err error) *v2alpha1.BulkItemResponse {
|
||||
if err != nil {
|
||||
return v2alpha1.NewBulkItemErrorServerResponse(rule.Alert, err)
|
||||
}
|
||||
return v2alpha1.NewBulkItemSuccessResponse(rule.Alert, v2alpha1.ResultCreated)
|
||||
}
|
||||
|
||||
for _, pr := range prometheusRules {
|
||||
if cursor >= len(rules) {
|
||||
break
|
||||
}
|
||||
if len(excludePrometheusRules) > 0 {
|
||||
if _, ok := excludePrometheusRules[pr.Name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
num = len(rules) - cursor
|
||||
stop = len(rules)
|
||||
rs []*RuleWithGroup
|
||||
)
|
||||
|
||||
// First add all the rules to this resource,
|
||||
// and if the limit is exceeded, add half
|
||||
for i := 1; i <= 2; i++ {
|
||||
stop = cursor + num/i
|
||||
rs = rules[cursor:stop]
|
||||
|
||||
err = r.doRuleResourceOperation(ctx, pr.Namespace, pr.Name, func(pr *promresourcesv1.PrometheusRule) error {
|
||||
resource := ruleResource(*pr)
|
||||
if ok, err := resource.addAlertingRules(rs...); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
if err = resource.commit(ctx, r.client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == errOutOfConfigMapSize && num > 1 {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == errOutOfConfigMapSize:
|
||||
break
|
||||
case resourceNotFound(err):
|
||||
continue
|
||||
default:
|
||||
for _, rule := range rs {
|
||||
respItems = append(respItems, resp(rule, err))
|
||||
}
|
||||
cursor = stop
|
||||
}
|
||||
}
|
||||
|
||||
// create new rule resources and add rest rules into them
|
||||
// when all existing rule resources are full.
|
||||
for cursor < len(rules) {
|
||||
var (
|
||||
err error
|
||||
num = len(rules) - cursor
|
||||
stop = len(rules)
|
||||
rs []*RuleWithGroup
|
||||
)
|
||||
// If adding the rules to the new resource exceeds the limit,
|
||||
// reduce the amount to 1/2, 1/3... of rest rules until the new resource can accommodate.
|
||||
for i := 1; ; i++ {
|
||||
stop = cursor + num/i
|
||||
rs = rules[cursor:stop]
|
||||
|
||||
pr := &promresourcesv1.PrometheusRule{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ruleNamespace.Name,
|
||||
GenerateName: customAlertingRuleResourcePrefix,
|
||||
Labels: ruleResourceLabels,
|
||||
},
|
||||
}
|
||||
resource := ruleResource(*pr)
|
||||
var ok bool
|
||||
ok, err = resource.addAlertingRules(rs...)
|
||||
if err == errOutOfConfigMapSize {
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
pr.Spec = resource.Spec
|
||||
_, err = r.client.MonitoringV1().PrometheusRules(ruleNamespace.Name).Create(ctx, pr, metav1.CreateOptions{})
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
for _, rule := range rs {
|
||||
respItems = append(respItems, resp(rule, err))
|
||||
}
|
||||
cursor = stop
|
||||
}
|
||||
|
||||
return respItems, nil
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) UpdateAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
extraRuleResourceSelector labels.Selector, ruleResourceLabels map[string]string,
|
||||
ruleItems ...*ResourceRuleItem) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
|
||||
var (
|
||||
itemsMap = make(map[string][]*ResourceRuleItem)
|
||||
respItems = make([]*v2alpha1.BulkItemResponse, 0, len(ruleItems))
|
||||
// rules updated successfully. The key is the rule name.
|
||||
rulesUpdated = make(map[string]struct{})
|
||||
// rules to be moved to other resources. The key is the resource name in which the rules reside.
|
||||
rulesToMove = make(map[string][]*ResourceRuleItem)
|
||||
// duplicate rules to be deleted
|
||||
rulesToDelete = make(map[string][]*ResourceRuleItem)
|
||||
)
|
||||
|
||||
for i, item := range ruleItems {
|
||||
itemsMap[item.ResourceName] = append(itemsMap[item.ResourceName], ruleItems[i])
|
||||
}
|
||||
|
||||
// Update the rules in the resources where the rules reside.
|
||||
// If duplicate rules are found, the first will be updated and the others will be deleted.
|
||||
// if updating the rules in the original resources causes exceeding size limit,
|
||||
// they will be moved to other resources and then be updated.
|
||||
for name, items := range itemsMap {
|
||||
var (
|
||||
nrules []*RuleWithGroup
|
||||
nitems []*ResourceRuleItem
|
||||
)
|
||||
|
||||
for i := range items {
|
||||
item := items[i]
|
||||
if _, ok := rulesUpdated[item.Alert]; ok {
|
||||
rulesToDelete[name] = append(rulesToDelete[name], item)
|
||||
continue
|
||||
}
|
||||
nrules = append(nrules, &item.RuleWithGroup)
|
||||
nitems = append(nitems, item)
|
||||
}
|
||||
if len(nrules) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
err := r.doRuleResourceOperation(ctx, ruleNamespace.Name, name, func(pr *promresourcesv1.PrometheusRule) error {
|
||||
resource := ruleResource(*pr)
|
||||
if ok, err := resource.updateAlertingRules(nrules...); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
if err = resource.commit(ctx, r.client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
for _, item := range items {
|
||||
rulesUpdated[item.Alert] = struct{}{}
|
||||
respItems = append(respItems, v2alpha1.NewBulkItemSuccessResponse(item.Alert, v2alpha1.ResultUpdated))
|
||||
}
|
||||
case err == errOutOfConfigMapSize: // Cannot update the rules in the original resource
|
||||
rulesToMove[name] = append(rulesToMove[name], nitems...)
|
||||
case resourceNotFound(err):
|
||||
for _, item := range items {
|
||||
respItems = append(respItems, &v2alpha1.BulkItemResponse{
|
||||
RuleName: item.Alert,
|
||||
Status: v2alpha1.StatusError,
|
||||
ErrorType: v2alpha1.ErrNotFound,
|
||||
})
|
||||
}
|
||||
default:
|
||||
for _, item := range items {
|
||||
respItems = append(respItems, v2alpha1.NewBulkItemErrorServerResponse(item.Alert, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The move here is not really move, because the move also requires an update.
|
||||
// What really happens is that the new rules will be added in other resources first,
|
||||
// and then the old rules will be deleted from the original resources.
|
||||
for name, items := range rulesToMove {
|
||||
var (
|
||||
nrules = make([]*RuleWithGroup, 0, len(items))
|
||||
nitems = make(map[string]*ResourceRuleItem, len(items))
|
||||
)
|
||||
for i := range items {
|
||||
item := items[i]
|
||||
nrules = append(nrules, &item.RuleWithGroup)
|
||||
nitems[item.Alert] = item
|
||||
}
|
||||
if len(nrules) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
aRespItems, err := r.addAlertingRules(ctx, ruleNamespace, extraRuleResourceSelector,
|
||||
map[string]struct{}{name: {}}, ruleResourceLabels, nrules...)
|
||||
if err != nil {
|
||||
for _, item := range items {
|
||||
respItems = append(respItems, v2alpha1.NewBulkItemErrorServerResponse(item.Alert, err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range aRespItems {
|
||||
resp := aRespItems[i]
|
||||
switch resp.Status {
|
||||
case v2alpha1.StatusSuccess:
|
||||
if item, ok := nitems[resp.RuleName]; ok {
|
||||
rulesToDelete[name] = append(rulesToDelete[name], item)
|
||||
}
|
||||
default:
|
||||
respItems = append(respItems, resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, items := range rulesToDelete {
|
||||
dRespItems, err := r.DeleteAlertingRules(ctx, ruleNamespace, items...)
|
||||
if err != nil {
|
||||
for _, item := range items {
|
||||
respItems = append(respItems, v2alpha1.NewBulkItemErrorServerResponse(item.Alert, err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
for i := range dRespItems {
|
||||
resp := dRespItems[i]
|
||||
if resp.Status == v2alpha1.StatusSuccess {
|
||||
// The delete operation here is for updating, so update the result to v2alpha1.ResultUpdated
|
||||
resp.Result = v2alpha1.ResultUpdated
|
||||
}
|
||||
respItems = append(respItems, resp)
|
||||
}
|
||||
}
|
||||
|
||||
return respItems, nil
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) DeleteAlertingRules(ctx context.Context, ruleNamespace *corev1.Namespace,
|
||||
ruleItems ...*ResourceRuleItem) ([]*v2alpha1.BulkItemResponse, error) {
|
||||
|
||||
var (
|
||||
itemsMap = make(map[string][]*ResourceRuleItem)
|
||||
respItems = make([]*v2alpha1.BulkItemResponse, 0, len(ruleItems))
|
||||
)
|
||||
|
||||
for i, ruleItem := range ruleItems {
|
||||
itemsMap[ruleItem.ResourceName] = append(itemsMap[ruleItem.ResourceName], ruleItems[i])
|
||||
}
|
||||
|
||||
resp := func(item *ResourceRuleItem, err error) *v2alpha1.BulkItemResponse {
|
||||
if err != nil {
|
||||
return v2alpha1.NewBulkItemErrorServerResponse(item.Alert, err)
|
||||
}
|
||||
return v2alpha1.NewBulkItemSuccessResponse(item.Alert, v2alpha1.ResultDeleted)
|
||||
}
|
||||
|
||||
for name, items := range itemsMap {
|
||||
var rules []*RuleWithGroup
|
||||
for i := range items {
|
||||
rules = append(rules, &items[i].RuleWithGroup)
|
||||
}
|
||||
|
||||
err := r.doRuleResourceOperation(ctx, ruleNamespace.Name, name, func(pr *promresourcesv1.PrometheusRule) error {
|
||||
resource := ruleResource(*pr)
|
||||
if ok, err := resource.deleteAlertingRules(rules...); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
if err = resource.commit(ctx, r.client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
for _, item := range items {
|
||||
respItems = append(respItems, resp(item, err))
|
||||
}
|
||||
}
|
||||
|
||||
return respItems, nil
|
||||
}
|
||||
|
||||
func (r *ThanosRuler) doRuleResourceOperation(ctx context.Context, namespace, name string,
|
||||
operation func(pr *promresourcesv1.PrometheusRule) error) error {
|
||||
// Lock here is used to lock specific resource in order to prevent frequent conflicts
|
||||
key := namespace + "/" + name
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
ruleResourceLocker.Lock(key)
|
||||
defer ruleResourceLocker.Unlock(key)
|
||||
pr, err := r.client.MonitoringV1().PrometheusRules(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return operation(pr)
|
||||
})
|
||||
}
|
||||
|
||||
func ruleNamespaceSelected(r Ruler, ruleNamespace *corev1.Namespace) (bool, error) {
|
||||
rnSelector, err := r.RuleResourceNamespaceSelector()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if rnSelector == nil { // refer to the comment of Prometheus.Spec.RuleResourceNamespaceSelector
|
||||
if r.Namespace() != ruleNamespace.Name {
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
if !rnSelector.Matches(labels.Set(ruleNamespace.Labels)) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func resourceNotFound(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *apierrors.StatusError:
|
||||
if e.Status().Code == http.StatusNotFound {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,414 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package rules
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus-community/prom-label-proxy/injectproxy"
|
||||
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
prommodel "github.com/prometheus/common/model"
|
||||
promlabels "github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrGenRuleId = "error generating rule id"
|
||||
|
||||
LabelKeyInternalRuleGroup = "__rule_group__"
|
||||
LabelKeyInternalRuleName = "__rule_name__"
|
||||
LabelKeyInternalRuleQuery = "__rule_query__"
|
||||
LabelKeyInternalRuleDuration = "__rule_duration__"
|
||||
|
||||
LabelKeyThanosRulerReplica = "thanos_ruler_replica"
|
||||
LabelKeyPrometheusReplica = "prometheus_replica"
|
||||
|
||||
LabelKeyRuleId = "rule_id"
|
||||
|
||||
LabelKeyAlertType = "alerttype"
|
||||
LabelValueAlertType = "metric"
|
||||
)
|
||||
|
||||
func FormatExpr(expr string) (string, error) {
|
||||
parsedExpr, err := parser.ParseExpr(expr)
|
||||
if err == nil {
|
||||
return parsedExpr.String(), nil
|
||||
}
|
||||
return "", errors.Wrapf(err, "failed to parse expr: %s", expr)
|
||||
}
|
||||
|
||||
// InjectExprNamespaceLabel injects an label, whose key is "namespace" and whose value is the given namespace,
|
||||
// into the prometheus query expression, which will limit the query scope.
|
||||
func InjectExprNamespaceLabel(expr, namespace string) (string, error) {
|
||||
parsedExpr, err := parser.ParseExpr(expr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = injectproxy.NewEnforcer(false, &promlabels.Matcher{
|
||||
Type: promlabels.MatchEqual,
|
||||
Name: "namespace",
|
||||
Value: namespace,
|
||||
}).EnforceNode(parsedExpr); err == nil {
|
||||
return parsedExpr.String(), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func FormatDuration(for_ string) (string, error) {
|
||||
var duration prommodel.Duration
|
||||
var err error
|
||||
if for_ != "" {
|
||||
duration, err = prommodel.ParseDuration(for_)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse Duration string(\"%s\") to time.Duration", for_)
|
||||
}
|
||||
}
|
||||
return duration.String(), nil
|
||||
}
|
||||
|
||||
func parseDurationSeconds(durationSeconds float64) string {
|
||||
return prommodel.Duration(int64(durationSeconds * float64(time.Second))).String()
|
||||
}
|
||||
|
||||
func GenResourceRuleIdIgnoreFormat(group string, rule *promresourcesv1.Rule) string {
|
||||
query, err := FormatExpr(rule.Expr.String())
|
||||
if err != nil {
|
||||
klog.Warning(errors.Wrapf(err, "invalid alerting rule(%s)", rule.Alert))
|
||||
query = rule.Expr.String()
|
||||
}
|
||||
duration, err := FormatDuration(string(rule.For))
|
||||
if err != nil {
|
||||
klog.Warning(errors.Wrapf(err, "invalid alerting rule(%s)", rule.Alert))
|
||||
duration = string(rule.For)
|
||||
}
|
||||
|
||||
lbls := make(map[string]string)
|
||||
for k, v := range rule.Labels {
|
||||
if k == LabelKeyRuleId {
|
||||
continue
|
||||
}
|
||||
lbls[k] = v
|
||||
}
|
||||
lbls[LabelKeyInternalRuleGroup] = group
|
||||
lbls[LabelKeyInternalRuleName] = rule.Alert
|
||||
lbls[LabelKeyInternalRuleQuery] = query
|
||||
lbls[LabelKeyInternalRuleDuration] = duration
|
||||
|
||||
return prommodel.Fingerprint(prommodel.LabelsToSignature(lbls)).String()
|
||||
}
|
||||
|
||||
func GenEndpointRuleId(group string, epRule *alerting.AlertingRule,
|
||||
externalLabels func() map[string]string) (string, error) {
|
||||
query, err := FormatExpr(epRule.Query)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
duration := parseDurationSeconds(epRule.Duration)
|
||||
|
||||
var extLabels map[string]string
|
||||
if externalLabels != nil {
|
||||
extLabels = externalLabels()
|
||||
}
|
||||
labelsMap := make(map[string]string)
|
||||
for key, value := range epRule.Labels {
|
||||
if key == LabelKeyPrometheusReplica || key == LabelKeyThanosRulerReplica {
|
||||
continue
|
||||
}
|
||||
if extLabels == nil {
|
||||
labelsMap[key] = value
|
||||
continue
|
||||
}
|
||||
if v, ok := extLabels[key]; !(ok && value == v) {
|
||||
labelsMap[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
lbls := make(map[string]string)
|
||||
for k, v := range labelsMap {
|
||||
if k == LabelKeyRuleId {
|
||||
continue
|
||||
}
|
||||
lbls[k] = v
|
||||
}
|
||||
lbls[LabelKeyInternalRuleGroup] = group
|
||||
lbls[LabelKeyInternalRuleName] = epRule.Name
|
||||
lbls[LabelKeyInternalRuleQuery] = query
|
||||
lbls[LabelKeyInternalRuleDuration] = duration
|
||||
|
||||
return prommodel.Fingerprint(prommodel.LabelsToSignature(lbls)).String(), nil
|
||||
}
|
||||
|
||||
// GetAlertingRulesStatus mix rules from prometheusrule custom resources and rules from endpoints.
|
||||
// Use rules from prometheusrule custom resources as the main reference.
|
||||
func GetAlertingRulesStatus(ruleNamespace string, ruleChunk *ResourceRuleChunk, epRuleGroups []*alerting.RuleGroup,
|
||||
extLabels func() map[string]string) ([]*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
var (
|
||||
idEpRules = make(map[string]*alerting.AlertingRule)
|
||||
nameIds = make(map[string][]string)
|
||||
ret []*v2alpha1.GettableAlertingRule
|
||||
)
|
||||
for _, group := range epRuleGroups {
|
||||
fileShort := strings.TrimSuffix(filepath.Base(group.File), filepath.Ext(group.File))
|
||||
if !strings.HasPrefix(fileShort, ruleNamespace+"-") {
|
||||
continue
|
||||
}
|
||||
var resourceRules *ResourceRuleCollection
|
||||
for resourceName, rules := range ruleChunk.ResourceRulesMap {
|
||||
if strings.Contains(strings.TrimPrefix(fileShort, ruleNamespace+"-"), resourceName) {
|
||||
resourceRules = rules
|
||||
}
|
||||
}
|
||||
if resourceRules == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := resourceRules.GroupSet[group.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, epRule := range group.Rules {
|
||||
if eid, err := GenEndpointRuleId(group.Name, epRule, extLabels); err != nil {
|
||||
return nil, errors.Wrap(err, ErrGenRuleId)
|
||||
} else {
|
||||
idEpRules[eid] = group.Rules[i]
|
||||
nameIds[epRule.Name] = append(nameIds[epRule.Name], eid)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ruleChunk.Custom {
|
||||
// guarantee the names of the custom alerting rules not to be repeated
|
||||
var m = make(map[string][]*ResourceRuleItem)
|
||||
for _, resourceRules := range ruleChunk.ResourceRulesMap {
|
||||
for name, rrArr := range resourceRules.NameRules {
|
||||
m[name] = append(m[name], rrArr...)
|
||||
}
|
||||
}
|
||||
for _, rrArr := range m {
|
||||
if l := len(rrArr); l > 0 {
|
||||
if l > 1 {
|
||||
sort.Slice(rrArr, func(i, j int) bool {
|
||||
return v2alpha1.AlertingRuleIdCompare(rrArr[i].Id, rrArr[j].Id)
|
||||
})
|
||||
}
|
||||
resRule := rrArr[0]
|
||||
epRule := idEpRules[resRule.Id]
|
||||
if r := getAlertingRuleStatus(resRule, epRule, ruleChunk.Custom, ruleChunk.Level); r != nil {
|
||||
ret = append(ret, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// guarantee the ids of the builtin alerting rules not to be repeated
|
||||
var m = make(map[string]*v2alpha1.GettableAlertingRule)
|
||||
for _, resourceRules := range ruleChunk.ResourceRulesMap {
|
||||
for id, rule := range resourceRules.IdRules {
|
||||
if r := getAlertingRuleStatus(rule, idEpRules[id], ruleChunk.Custom, ruleChunk.Level); r != nil {
|
||||
m[id] = r
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, r := range m {
|
||||
ret = append(ret, r)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func GetAlertingRuleStatus(ruleNamespace string, rule *ResourceRule, epRuleGroups []*alerting.RuleGroup,
|
||||
extLabels func() map[string]string) (*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
if rule == nil || rule.Alert == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var epRule *alerting.AlertingRule
|
||||
out:
|
||||
for _, group := range epRuleGroups {
|
||||
fileShort := strings.TrimSuffix(filepath.Base(group.File), filepath.Ext(group.File))
|
||||
if !strings.HasPrefix(fileShort, ruleNamespace+"-") {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(strings.TrimPrefix(fileShort, ruleNamespace+"-"), rule.ResourceName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if group.Name != rule.Group {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, epr := range group.Rules {
|
||||
if epr.Name != rule.Alert { // first check name to speed up the hit
|
||||
continue
|
||||
}
|
||||
if eid, err := GenEndpointRuleId(group.Name, epr, extLabels); err != nil {
|
||||
return nil, errors.Wrap(err, ErrGenRuleId)
|
||||
} else {
|
||||
if rule.Id == eid {
|
||||
epRule = epr
|
||||
break out
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return getAlertingRuleStatus(&rule.ResourceRuleItem, epRule, rule.Custom, rule.Level), nil
|
||||
}
|
||||
|
||||
func getAlertingRuleStatus(resRule *ResourceRuleItem, epRule *alerting.AlertingRule,
|
||||
custom bool, level v2alpha1.RuleLevel) *v2alpha1.GettableAlertingRule {
|
||||
|
||||
if resRule == nil || resRule.Alert == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
rule := v2alpha1.GettableAlertingRule{
|
||||
AlertingRule: v2alpha1.AlertingRule{
|
||||
Id: resRule.Id,
|
||||
Name: resRule.Rule.Alert,
|
||||
Query: resRule.Rule.Expr.String(),
|
||||
Duration: string(resRule.Rule.For),
|
||||
Labels: resRule.Rule.Labels,
|
||||
Annotations: resRule.Rule.Annotations,
|
||||
},
|
||||
State: stateInactiveString,
|
||||
Health: string(rules.HealthUnknown),
|
||||
}
|
||||
|
||||
if epRule != nil {
|
||||
// The state information and alerts associated with the rule are from the rule from the endpoint.
|
||||
if epRule.Health != "" {
|
||||
rule.Health = epRule.Health
|
||||
}
|
||||
rule.LastError = epRule.LastError
|
||||
rule.LastEvaluation = epRule.LastEvaluation
|
||||
if epRule.EvaluationTime != nil {
|
||||
rule.EvaluationDurationSeconds = *epRule.EvaluationTime
|
||||
}
|
||||
|
||||
rState := strings.ToLower(epRule.State)
|
||||
cliRuleStateEmpty := rState == ""
|
||||
if !cliRuleStateEmpty {
|
||||
rule.State = rState
|
||||
}
|
||||
for _, a := range epRule.Alerts {
|
||||
aState := strings.ToLower(a.State)
|
||||
if cliRuleStateEmpty {
|
||||
// for the rules gotten from prometheus or thanos ruler with a lower version, they may not contain
|
||||
// the state property, so compute the rule state by states of its alerts
|
||||
if alertState(rState) < alertState(aState) {
|
||||
rule.State = aState
|
||||
}
|
||||
}
|
||||
rule.Alerts = append(rule.Alerts, &v2alpha1.Alert{
|
||||
ActiveAt: a.ActiveAt,
|
||||
Labels: a.Labels,
|
||||
Annotations: a.Annotations,
|
||||
State: aState,
|
||||
Value: a.Value,
|
||||
|
||||
RuleId: rule.Id,
|
||||
RuleName: rule.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
return &rule
|
||||
}
|
||||
|
||||
func ParseAlertingRules(epRuleGroups []*alerting.RuleGroup, custom bool, level v2alpha1.RuleLevel,
|
||||
filterFunc func(group, ruleId string, rule *alerting.AlertingRule) bool) ([]*v2alpha1.GettableAlertingRule, error) {
|
||||
|
||||
var ret []*v2alpha1.GettableAlertingRule
|
||||
for _, g := range epRuleGroups {
|
||||
for _, r := range g.Rules {
|
||||
id, err := GenEndpointRuleId(g.Name, r, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if filterFunc(g.Name, id, r) {
|
||||
rule := &v2alpha1.GettableAlertingRule{
|
||||
AlertingRule: v2alpha1.AlertingRule{
|
||||
Id: id,
|
||||
Name: r.Name,
|
||||
Query: r.Query,
|
||||
Duration: parseDurationSeconds(r.Duration),
|
||||
Labels: r.Labels,
|
||||
Annotations: r.Annotations,
|
||||
},
|
||||
State: r.State,
|
||||
Health: string(r.Health),
|
||||
LastError: r.LastError,
|
||||
LastEvaluation: r.LastEvaluation,
|
||||
}
|
||||
if r.EvaluationTime != nil {
|
||||
rule.EvaluationDurationSeconds = *r.EvaluationTime
|
||||
}
|
||||
if rule.Health != "" {
|
||||
rule.Health = string(rules.HealthUnknown)
|
||||
}
|
||||
ruleStateEmpty := rule.State == ""
|
||||
rule.State = stateInactiveString
|
||||
for _, a := range r.Alerts {
|
||||
aState := strings.ToLower(a.State)
|
||||
if ruleStateEmpty {
|
||||
// for the rules gotten from prometheus or thanos ruler with a lower version, they may not contain
|
||||
// the state property, so compute the rule state by states of its alerts
|
||||
if alertState(rule.State) < alertState(aState) {
|
||||
rule.State = aState
|
||||
}
|
||||
}
|
||||
rule.Alerts = append(rule.Alerts, &v2alpha1.Alert{
|
||||
ActiveAt: a.ActiveAt,
|
||||
Labels: a.Labels,
|
||||
Annotations: a.Annotations,
|
||||
State: aState,
|
||||
Value: a.Value,
|
||||
|
||||
RuleId: rule.Id,
|
||||
RuleName: rule.Name,
|
||||
})
|
||||
}
|
||||
ret = append(ret, rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
var (
|
||||
statePendingString = rules.StatePending.String()
|
||||
stateFiringString = rules.StateFiring.String()
|
||||
stateInactiveString = rules.StateInactive.String()
|
||||
)
|
||||
|
||||
func alertState(state string) rules.AlertState {
|
||||
switch state {
|
||||
case statePendingString:
|
||||
return rules.StatePending
|
||||
case stateFiringString:
|
||||
return rules.StateFiring
|
||||
case stateInactiveString:
|
||||
return rules.StateInactive
|
||||
}
|
||||
return rules.StateInactive
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package rules
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
promresourcesv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/alerting/v2alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/alerting"
|
||||
)
|
||||
|
||||
func TestGetAlertingRulesStatus(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
ruleNamespace string
|
||||
resourceRuleChunk *ResourceRuleChunk
|
||||
ruleGroups []*alerting.RuleGroup
|
||||
extLabels func() map[string]string
|
||||
expected []*v2alpha1.GettableAlertingRule
|
||||
}{{
|
||||
description: "get alerting rules status",
|
||||
ruleNamespace: "test",
|
||||
resourceRuleChunk: &ResourceRuleChunk{
|
||||
Level: v2alpha1.RuleLevelNamespace,
|
||||
Custom: true,
|
||||
ResourceRulesMap: map[string]*ResourceRuleCollection{
|
||||
"custom-alerting-rule-jqbgn": {
|
||||
GroupSet: map[string]struct{}{"alerting.custom.defaults": {}},
|
||||
NameRules: map[string][]*ResourceRuleItem{
|
||||
"ca7f09e76954e67c": {{
|
||||
ResourceName: "custom-alerting-rule-jqbgn",
|
||||
RuleWithGroup: RuleWithGroup{
|
||||
Group: "alerting.custom.defaults",
|
||||
Id: "ca7f09e76954e67c",
|
||||
Rule: promresourcesv1.Rule{
|
||||
Alert: "TestCPUUsageHigh",
|
||||
Expr: intstr.FromString(`namespace:workload_cpu_usage:sum{namespace="test"} > 1`),
|
||||
For: "1m",
|
||||
Annotations: map[string]string{
|
||||
"alias": "The alias is here",
|
||||
"description": "The description is here",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ruleGroups: []*alerting.RuleGroup{{
|
||||
Name: "alerting.custom.defaults",
|
||||
File: "/etc/thanos/rules/thanos-ruler-thanos-ruler-rulefiles-0/test-custom-alerting-rule-jqbgn.yaml",
|
||||
Rules: []*alerting.AlertingRule{{
|
||||
Name: "TestCPUUsageHigh",
|
||||
Query: `namespace:workload_cpu_usage:sum{namespace="test"} > 1`,
|
||||
Duration: 60,
|
||||
Health: string(rules.HealthGood),
|
||||
State: stateInactiveString,
|
||||
Annotations: map[string]string{
|
||||
"alias": "The alias is here",
|
||||
"description": "The description is here",
|
||||
},
|
||||
}},
|
||||
}},
|
||||
expected: []*v2alpha1.GettableAlertingRule{{
|
||||
AlertingRule: v2alpha1.AlertingRule{
|
||||
Id: "ca7f09e76954e67c",
|
||||
Name: "TestCPUUsageHigh",
|
||||
Query: `namespace:workload_cpu_usage:sum{namespace="test"} > 1`,
|
||||
Duration: "1m",
|
||||
Annotations: map[string]string{
|
||||
"alias": "The alias is here",
|
||||
"description": "The description is here",
|
||||
},
|
||||
},
|
||||
Health: string(rules.HealthGood),
|
||||
State: stateInactiveString,
|
||||
}},
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
rules, err := GetAlertingRulesStatus(test.ruleNamespace, test.resourceRuleChunk, test.ruleGroups, test.extLabels)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(rules, test.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auditing
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/auditing/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/auditing"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Events(queryParam *v1alpha1.Query, MutateFilterFunc func(*auditing.Filter)) (*v1alpha1.APIResponse, error)
|
||||
}
|
||||
|
||||
type eventsOperator struct {
|
||||
client auditing.Client
|
||||
}
|
||||
|
||||
func NewEventsOperator(client auditing.Client) Interface {
|
||||
return &eventsOperator{client}
|
||||
}
|
||||
|
||||
func (eo *eventsOperator) Events(queryParam *v1alpha1.Query,
|
||||
MutateFilterFunc func(*auditing.Filter)) (*v1alpha1.APIResponse, error) {
|
||||
filter := &auditing.Filter{
|
||||
ObjectRefNamespaces: stringutils.Split(queryParam.ObjectRefNamespaceFilter, ","),
|
||||
ObjectRefNamespaceFuzzy: stringutils.Split(queryParam.ObjectRefNamespaceSearch, ","),
|
||||
Workspaces: stringutils.Split(queryParam.WorkspaceFilter, ","),
|
||||
WorkspaceFuzzy: stringutils.Split(queryParam.WorkspaceSearch, ","),
|
||||
ObjectRefNames: stringutils.Split(queryParam.ObjectRefNameFilter, ","),
|
||||
ObjectRefNameFuzzy: stringutils.Split(queryParam.ObjectRefNameSearch, ","),
|
||||
Levels: stringutils.Split(queryParam.LevelFilter, ","),
|
||||
Verbs: stringutils.Split(queryParam.VerbFilter, ","),
|
||||
Users: stringutils.Split(queryParam.UserFilter, ","),
|
||||
UserFuzzy: stringutils.Split(queryParam.UserSearch, ","),
|
||||
GroupFuzzy: stringutils.Split(queryParam.GroupSearch, ","),
|
||||
SourceIpFuzzy: stringutils.Split(queryParam.SourceIpSearch, ","),
|
||||
ObjectRefResources: stringutils.Split(queryParam.ObjectRefResourceFilter, ","),
|
||||
ObjectRefSubresources: stringutils.Split(queryParam.ObjectRefSubresourceFilter, ","),
|
||||
ResponseStatus: stringutils.Split(queryParam.ResponseStatusFilter, ","),
|
||||
StartTime: queryParam.StartTime,
|
||||
EndTime: queryParam.EndTime,
|
||||
}
|
||||
if MutateFilterFunc != nil {
|
||||
MutateFilterFunc(filter)
|
||||
}
|
||||
|
||||
cs := stringutils.Split(queryParam.ResponseCodeFilter, ",")
|
||||
for _, c := range cs {
|
||||
code, err := strconv.ParseInt(c, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
filter.ResponseCodes = append(filter.ResponseCodes, int32(code))
|
||||
}
|
||||
|
||||
var ar v1alpha1.APIResponse
|
||||
var err error
|
||||
switch queryParam.Operation {
|
||||
case "histogram":
|
||||
if len(filter.ObjectRefNamespaceMap) == 0 && len(filter.WorkspaceMap) == 0 {
|
||||
ar.Histogram = &auditing.Histogram{}
|
||||
} else {
|
||||
ar.Histogram, err = eo.client.CountOverTime(filter, queryParam.Interval)
|
||||
}
|
||||
case "statistics":
|
||||
if len(filter.ObjectRefNamespaceMap) == 0 && len(filter.WorkspaceMap) == 0 {
|
||||
ar.Statistics = &auditing.Statistics{}
|
||||
} else {
|
||||
ar.Statistics, err = eo.client.StatisticsOnResources(filter)
|
||||
}
|
||||
default:
|
||||
if len(filter.ObjectRefNamespaceMap) == 0 && len(filter.WorkspaceMap) == 0 {
|
||||
ar.Events = &auditing.Events{}
|
||||
} else {
|
||||
ar.Events, err = eo.client.SearchAuditingEvent(filter, queryParam.From, queryParam.Size, queryParam.Sort)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ar, nil
|
||||
}
|
||||
@@ -1,20 +1,7 @@
|
||||
/*
|
||||
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
@@ -22,21 +9,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/mail"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
authuser "k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/klog/v2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -52,7 +31,7 @@ var (
|
||||
// "k8s.io/apimachinery/pkg/api", and restful.ServerError defined at package
|
||||
// "github.com/emicklei/go-restful/v3", or the server cannot handle error correctly.
|
||||
type PasswordAuthenticator interface {
|
||||
Authenticate(ctx context.Context, provider, username, password string) (authuser.Info, string, error)
|
||||
Authenticate(ctx context.Context, provider, username, password string) (authuser.Info, error)
|
||||
}
|
||||
|
||||
// OAuthAuthenticator authenticate users by OAuth 2.0 Authorization Framework. Note that implement this
|
||||
@@ -60,76 +39,32 @@ type PasswordAuthenticator interface {
|
||||
// "k8s.io/apimachinery/pkg/api", and restful.ServerError defined at package
|
||||
// "github.com/emicklei/go-restful/v3", or the server cannot handle error correctly.
|
||||
type OAuthAuthenticator interface {
|
||||
Authenticate(ctx context.Context, provider string, req *http.Request) (authuser.Info, string, error)
|
||||
Authenticate(ctx context.Context, provider string, req *http.Request) (authuser.Info, error)
|
||||
}
|
||||
|
||||
type userGetter struct {
|
||||
userLister iamv1alpha2listers.UserLister
|
||||
}
|
||||
|
||||
func preRegistrationUser(idp string, identity identityprovider.Identity) authuser.Info {
|
||||
func newRreRegistrationUser(idp string, identity identityprovider.Identity) authuser.Info {
|
||||
return &authuser.DefaultInfo{
|
||||
Name: iamv1alpha2.PreRegistrationUser,
|
||||
Name: iamv1beta1.PreRegistrationUser,
|
||||
Extra: map[string][]string{
|
||||
iamv1alpha2.ExtraIdentityProvider: {idp},
|
||||
iamv1alpha2.ExtraUID: {identity.GetUserID()},
|
||||
iamv1alpha2.ExtraUsername: {identity.GetUsername()},
|
||||
iamv1alpha2.ExtraEmail: {identity.GetEmail()},
|
||||
iamv1beta1.ExtraIdentityProvider: {idp},
|
||||
iamv1beta1.ExtraUID: {identity.GetUserID()},
|
||||
iamv1beta1.ExtraUsername: {identity.GetUsername()},
|
||||
iamv1beta1.ExtraEmail: {identity.GetEmail()},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mappedUser(idp string, identity identityprovider.Identity) *iamv1alpha2.User {
|
||||
func newMappedUser(idp string, identity identityprovider.Identity) *iamv1beta1.User {
|
||||
// username convert
|
||||
username := strings.ToLower(identity.GetUsername())
|
||||
return &iamv1alpha2.User{
|
||||
return &iamv1beta1.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: username,
|
||||
Labels: map[string]string{
|
||||
iamv1alpha2.IdentifyProviderLabel: idp,
|
||||
iamv1alpha2.OriginUIDLabel: identity.GetUserID(),
|
||||
iamv1beta1.IdentifyProviderLabel: idp,
|
||||
iamv1beta1.OriginUIDLabel: identity.GetUserID(),
|
||||
},
|
||||
},
|
||||
Spec: iamv1alpha2.UserSpec{Email: identity.GetEmail()},
|
||||
Spec: iamv1beta1.UserSpec{Email: identity.GetEmail()},
|
||||
}
|
||||
}
|
||||
|
||||
// findUser returns the user associated with the username or email
|
||||
func (u *userGetter) findUser(username string) (*iamv1alpha2.User, error) {
|
||||
if _, err := mail.ParseAddress(username); err != nil {
|
||||
return u.userLister.Get(username)
|
||||
}
|
||||
|
||||
users, err := u.userLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
if user.Spec.Email == username {
|
||||
return user, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFound(iamv1alpha2.Resource("user"), username)
|
||||
}
|
||||
|
||||
// findMappedUser returns the user which mapped to the identity
|
||||
func (u *userGetter) findMappedUser(idp, uid string) (*iamv1alpha2.User, error) {
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
iamv1alpha2.IdentifyProviderLabel: idp,
|
||||
iamv1alpha2.OriginUIDLabel: uid,
|
||||
})
|
||||
|
||||
users, err := u.userLister.List(selector)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
if len(users) != 1 {
|
||||
return nil, errors.NewNotFound(iamv1alpha2.Resource("user"), uid)
|
||||
}
|
||||
|
||||
return users[0], err
|
||||
}
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
@@ -23,33 +12,30 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type LoginRecorder interface {
|
||||
RecordLogin(username string, loginType iamv1alpha2.LoginType, provider string, sourceIP string, userAgent string, authErr error) error
|
||||
RecordLogin(ctx context.Context, username string, loginType iamv1beta1.LoginType, provider string, sourceIP string, userAgent string, authErr error) error
|
||||
}
|
||||
|
||||
type loginRecorder struct {
|
||||
ksClient kubesphere.Interface
|
||||
userGetter *userGetter
|
||||
client runtimeclient.Client
|
||||
userMapper userMapper
|
||||
}
|
||||
|
||||
func NewLoginRecorder(ksClient kubesphere.Interface, userLister iamv1alpha2listers.UserLister) LoginRecorder {
|
||||
func NewLoginRecorder(cacheClient runtimeclient.Client) LoginRecorder {
|
||||
return &loginRecorder{
|
||||
ksClient: ksClient,
|
||||
userGetter: &userGetter{userLister: userLister},
|
||||
client: cacheClient,
|
||||
userMapper: userMapper{cache: cacheClient},
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLogin Create v1alpha2.LoginRecord for existing accounts
|
||||
func (l *loginRecorder) RecordLogin(username string, loginType iamv1alpha2.LoginType, provider, sourceIP, userAgent string, authErr error) error {
|
||||
func (l *loginRecorder) RecordLogin(ctx context.Context, username string, loginType iamv1beta1.LoginType, provider, sourceIP, userAgent string, authErr error) error {
|
||||
// only for existing accounts, solve the problem of huge entries
|
||||
user, err := l.userGetter.findUser(username)
|
||||
user, err := l.userMapper.Find(ctx, username)
|
||||
if err != nil {
|
||||
// ignore not found error
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -58,30 +44,29 @@ func (l *loginRecorder) RecordLogin(username string, loginType iamv1alpha2.Login
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
loginEntry := &iamv1alpha2.LoginRecord{
|
||||
record := &iamv1beta1.LoginRecord{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("%s-", user.Name),
|
||||
Labels: map[string]string{
|
||||
iamv1alpha2.UserReferenceLabel: user.Name,
|
||||
iamv1beta1.UserReferenceLabel: user.Name,
|
||||
},
|
||||
},
|
||||
Spec: iamv1alpha2.LoginRecordSpec{
|
||||
Spec: iamv1beta1.LoginRecordSpec{
|
||||
Type: loginType,
|
||||
Provider: provider,
|
||||
Success: true,
|
||||
Reason: iamv1alpha2.AuthenticatedSuccessfully,
|
||||
Reason: iamv1beta1.AuthenticatedSuccessfully,
|
||||
SourceIP: sourceIP,
|
||||
UserAgent: userAgent,
|
||||
},
|
||||
}
|
||||
|
||||
if authErr != nil {
|
||||
loginEntry.Spec.Success = false
|
||||
loginEntry.Spec.Reason = authErr.Error()
|
||||
record.Spec.Success = false
|
||||
record.Spec.Reason = authErr.Error()
|
||||
}
|
||||
|
||||
_, err = l.ksClient.IamV1alpha2().LoginRecords().Create(context.Background(), loginEntry, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if err = l.client.Create(context.Background(), record); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
57
pkg/models/auth/mapper.go
Normal file
57
pkg/models/auth/mapper.go
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/mail"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type userMapper struct {
|
||||
cache runtimeclient.Reader
|
||||
}
|
||||
|
||||
// Find returns the user associated with the username or email
|
||||
func (u *userMapper) Find(ctx context.Context, username string) (*iamv1beta1.User, error) {
|
||||
user := &iamv1beta1.User{}
|
||||
if _, err := mail.ParseAddress(username); err != nil {
|
||||
return user, u.cache.Get(ctx, types.NamespacedName{Name: username}, user)
|
||||
}
|
||||
|
||||
// TODO cache with index
|
||||
userList := &iamv1beta1.UserList{}
|
||||
if err := u.cache.List(ctx, userList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, user := range userList.Items {
|
||||
if user.Spec.Email == username {
|
||||
return &user, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFound(iamv1beta1.Resource("user"), username)
|
||||
}
|
||||
|
||||
// FindMappedUser returns the user which mapped to the identity
|
||||
func (u *userMapper) FindMappedUser(ctx context.Context, idp, uid string) (*iamv1beta1.User, error) {
|
||||
userList := &iamv1beta1.UserList{}
|
||||
if err := u.cache.List(ctx, userList, runtimeclient.MatchingLabels{
|
||||
iamv1beta1.IdentifyProviderLabel: idp,
|
||||
iamv1beta1.OriginUIDLabel: uid,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(userList.Items) != 1 {
|
||||
return nil, nil
|
||||
}
|
||||
return &userList.Items[0], nil
|
||||
}
|
||||
@@ -1,102 +1,84 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
authuser "k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/klog/v2"
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
)
|
||||
|
||||
type oauthAuthenticator struct {
|
||||
ksClient kubesphere.Interface
|
||||
userGetter *userGetter
|
||||
options *authentication.Options
|
||||
client runtimeclient.Client
|
||||
userGetter *userMapper
|
||||
idpConfigurationGetter identityprovider.ConfigurationGetter
|
||||
}
|
||||
|
||||
func NewOAuthAuthenticator(ksClient kubesphere.Interface,
|
||||
userLister iamv1alpha2listers.UserLister,
|
||||
options *authentication.Options) OAuthAuthenticator {
|
||||
func NewOAuthAuthenticator(cacheClient runtimeclient.Client) OAuthAuthenticator {
|
||||
authenticator := &oauthAuthenticator{
|
||||
ksClient: ksClient,
|
||||
userGetter: &userGetter{userLister: userLister},
|
||||
options: options,
|
||||
client: cacheClient,
|
||||
userGetter: &userMapper{cache: cacheClient},
|
||||
idpConfigurationGetter: identityprovider.NewConfigurationGetter(cacheClient),
|
||||
}
|
||||
return authenticator
|
||||
}
|
||||
|
||||
func (o *oauthAuthenticator) Authenticate(_ context.Context, provider string, req *http.Request) (authuser.Info, string, error) {
|
||||
providerOptions, err := o.options.OAuthOptions.IdentityProviderOptions(provider)
|
||||
func (o *oauthAuthenticator) Authenticate(ctx context.Context, provider string, req *http.Request) (authuser.Info, error) {
|
||||
providerConfig, err := o.idpConfigurationGetter.GetConfiguration(ctx, provider)
|
||||
// identity provider not registered
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
return nil, fmt.Errorf("failed to get identity provider configuration for %s, error: %v", provider, err)
|
||||
}
|
||||
oauthIdentityProvider, err := identityprovider.GetOAuthProvider(providerOptions.Name)
|
||||
|
||||
oauthIdentityProvider, exist := identityprovider.SharedIdentityProviderController.GetOAuthProvider(provider)
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("identity provider %s not exist", provider)
|
||||
}
|
||||
|
||||
identity, err := oauthIdentityProvider.IdentityExchangeCallback(req)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
return nil, fmt.Errorf("failed to exchange identity for %s, error: %v", provider, err)
|
||||
}
|
||||
authenticated, err := oauthIdentityProvider.IdentityExchangeCallback(req)
|
||||
|
||||
mappedUser, err := o.userGetter.FindMappedUser(ctx, providerConfig.Name, identity.GetUserID())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
return nil, fmt.Errorf("failed to find mapped user for %s, error: %v", provider, err)
|
||||
}
|
||||
|
||||
user, err := o.userGetter.findMappedUser(providerOptions.Name, authenticated.GetUserID())
|
||||
if user == nil && providerOptions.MappingMethod == oauth.MappingMethodLookup {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// the user will automatically create and mapping when login successful.
|
||||
if user == nil && providerOptions.MappingMethod == oauth.MappingMethodAuto {
|
||||
if !providerOptions.DisableLoginConfirmation {
|
||||
return preRegistrationUser(providerOptions.Name, authenticated), providerOptions.Name, nil
|
||||
if mappedUser == nil {
|
||||
if providerConfig.MappingMethod == identityprovider.MappingMethodLookup {
|
||||
return nil, fmt.Errorf("failed to find mapped user: %s", identity.GetUserID())
|
||||
}
|
||||
user, err = o.ksClient.IamV1alpha2().Users().Create(context.Background(), mappedUser(providerOptions.Name, authenticated), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, providerOptions.Name, err
|
||||
|
||||
if providerConfig.MappingMethod == identityprovider.MappingMethodManual {
|
||||
return newRreRegistrationUser(providerConfig.Name, identity), nil
|
||||
}
|
||||
|
||||
if providerConfig.MappingMethod == identityprovider.MappingMethodAuto {
|
||||
mappedUser = newMappedUser(providerConfig.Name, identity)
|
||||
|
||||
if err = o.client.Create(ctx, mappedUser); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &authuser.DefaultInfo{Name: mappedUser.GetName()}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid mapping method found %s", providerConfig.MappingMethod)
|
||||
}
|
||||
|
||||
if user != nil {
|
||||
if user.Status.State == iamv1alpha2.UserDisabled {
|
||||
// state not active
|
||||
return nil, "", AccountIsNotActiveError
|
||||
}
|
||||
return &authuser.DefaultInfo{Name: user.GetName()}, providerOptions.Name, nil
|
||||
if mappedUser.Status.State == iamv1beta1.UserDisabled {
|
||||
return nil, AccountIsNotActiveError
|
||||
}
|
||||
|
||||
return nil, "", errors.NewNotFound(iamv1alpha2.Resource("user"), authenticated.GetUsername())
|
||||
return &authuser.DefaultInfo{Name: mappedUser.GetName()}, nil
|
||||
}
|
||||
|
||||
@@ -1,18 +1,8 @@
|
||||
/*
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package auth
|
||||
|
||||
import (
|
||||
@@ -22,66 +12,90 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/options"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache/informertest"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"gopkg.in/yaml.v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
runtimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
|
||||
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/scheme"
|
||||
"kubesphere.io/kubesphere/pkg/server/options"
|
||||
)
|
||||
|
||||
func Test_oauthAuthenticator_Authenticate(t *testing.T) {
|
||||
|
||||
oauthOptions := &authentication.Options{
|
||||
OAuthOptions: &oauth.Options{
|
||||
IdentityProviders: []oauth.IdentityProviderOptions{
|
||||
{
|
||||
Name: "fake",
|
||||
MappingMethod: "auto",
|
||||
Type: "FakeIdentityProvider",
|
||||
Provider: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"code1": map[string]string{
|
||||
"uid": "100001",
|
||||
"email": "user1@kubesphere.io",
|
||||
"username": "user1",
|
||||
},
|
||||
"code2": map[string]string{
|
||||
"uid": "100002",
|
||||
"email": "user2@kubesphere.io",
|
||||
"username": "user2",
|
||||
},
|
||||
},
|
||||
},
|
||||
fakeIDP := &identityprovider.Configuration{
|
||||
Name: "fake",
|
||||
MappingMethod: "auto",
|
||||
Type: "FakeOAuthProvider",
|
||||
ProviderOptions: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"code1": map[string]string{
|
||||
"uid": "100001",
|
||||
"email": "user1@kubesphere.io",
|
||||
"username": "user1",
|
||||
},
|
||||
"code2": map[string]string{
|
||||
"uid": "100002",
|
||||
"email": "user2@kubesphere.io",
|
||||
"username": "user2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
identityprovider.RegisterOAuthProvider(&fakeProviderFactory{})
|
||||
if err := identityprovider.SetupWithOptions(oauthOptions.OAuthOptions.IdentityProviders); err != nil {
|
||||
marshal, err := yaml.Marshal(fakeIDP)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-fake-idp",
|
||||
Namespace: "kubesphere-system",
|
||||
Labels: map[string]string{
|
||||
constants.GenericConfigTypeLabel: identityprovider.ConfigTypeIdentityProvider,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"configuration.yaml": marshal,
|
||||
},
|
||||
Type: identityprovider.SecretTypeIdentityProvider,
|
||||
}
|
||||
|
||||
fakeCache := informertest.FakeInformers{Scheme: scheme.Scheme}
|
||||
err = fakeCache.Start(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeSecretInformer, err := fakeCache.FakeInformerFor(context.Background(), &v1.Secret{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ksClient := fakeks.NewSimpleClientset()
|
||||
ksInformerFactory := ksinformers.NewSharedInformerFactory(ksClient, 0)
|
||||
|
||||
if err := ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newUser("user1", "100001", "fake")); err != nil {
|
||||
identityprovider.RegisterOAuthProviderFactory(&fakeProviderFactory{})
|
||||
identityprovider.SharedIdentityProviderController = identityprovider.NewController()
|
||||
err = identityprovider.SharedIdentityProviderController.WatchConfigurationChanges(context.Background(), &fakeCache)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fakeSecretInformer.Add(secret)
|
||||
|
||||
blockedUser := newUser("user2", "100002", "fake")
|
||||
blockedUser.Status = iamv1alpha2.UserStatus{State: iamv1alpha2.UserDisabled}
|
||||
if err := ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(blockedUser); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockedUser.Status = iamv1beta1.UserStatus{State: iamv1beta1.UserDisabled}
|
||||
|
||||
client := runtimefakeclient.NewClientBuilder().
|
||||
WithScheme(scheme.Scheme).
|
||||
WithRuntimeObjects(newUser("user1", "100001", "fake"), secret, blockedUser).
|
||||
Build()
|
||||
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
@@ -97,12 +111,8 @@ func Test_oauthAuthenticator_Authenticate(t *testing.T) {
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Should successfully",
|
||||
oauthAuthenticator: NewOAuthAuthenticator(
|
||||
nil,
|
||||
ksInformerFactory.Iam().V1alpha2().Users().Lister(),
|
||||
oauthOptions,
|
||||
),
|
||||
name: "Should successfully",
|
||||
oauthAuthenticator: NewOAuthAuthenticator(client),
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
provider: "fake",
|
||||
@@ -115,12 +125,8 @@ func Test_oauthAuthenticator_Authenticate(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Blocked user test",
|
||||
oauthAuthenticator: NewOAuthAuthenticator(
|
||||
nil,
|
||||
ksInformerFactory.Iam().V1alpha2().Users().Lister(),
|
||||
oauthOptions,
|
||||
),
|
||||
name: "Blocked user test",
|
||||
oauthAuthenticator: NewOAuthAuthenticator(client),
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
provider: "fake",
|
||||
@@ -131,12 +137,8 @@ func Test_oauthAuthenticator_Authenticate(t *testing.T) {
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Should successfully",
|
||||
oauthAuthenticator: NewOAuthAuthenticator(
|
||||
nil,
|
||||
ksInformerFactory.Iam().V1alpha2().Users().Lister(),
|
||||
oauthOptions,
|
||||
),
|
||||
name: "Should successfully",
|
||||
oauthAuthenticator: NewOAuthAuthenticator(client),
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
provider: "fake1",
|
||||
@@ -148,7 +150,7 @@ func Test_oauthAuthenticator_Authenticate(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
userInfo, provider, err := tt.oauthAuthenticator.Authenticate(tt.args.ctx, tt.args.provider, tt.args.req)
|
||||
userInfo, err := tt.oauthAuthenticator.Authenticate(tt.args.ctx, tt.args.provider, tt.args.req)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Authenticate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -156,9 +158,6 @@ func Test_oauthAuthenticator_Authenticate(t *testing.T) {
|
||||
if !reflect.DeepEqual(userInfo, tt.userInfo) {
|
||||
t.Errorf("Authenticate() got = %v, want %v", userInfo, tt.userInfo)
|
||||
}
|
||||
if provider != tt.provider {
|
||||
t.Errorf("Authenticate() got = %v, want %v", provider, tt.provider)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -170,17 +169,17 @@ func must(r *http.Request, err error) *http.Request {
|
||||
return r
|
||||
}
|
||||
|
||||
func newUser(username string, uid string, idp string) *iamv1alpha2.User {
|
||||
return &iamv1alpha2.User{
|
||||
func newUser(username string, uid string, idp string) *iamv1beta1.User {
|
||||
return &iamv1beta1.User{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
APIVersion: iamv1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: iamv1beta1.ResourceKindUser,
|
||||
APIVersion: iamv1beta1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: username,
|
||||
Labels: map[string]string{
|
||||
iamv1alpha2.IdentifyProviderLabel: idp,
|
||||
iamv1alpha2.OriginUIDLabel: uid,
|
||||
iamv1beta1.IdentifyProviderLabel: idp,
|
||||
iamv1beta1.OriginUIDLabel: uid,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -212,7 +211,7 @@ func (f fakeIdentity) GetEmail() string {
|
||||
}
|
||||
|
||||
func (fakeProviderFactory) Type() string {
|
||||
return "FakeIdentityProvider"
|
||||
return "FakeOAuthProvider"
|
||||
}
|
||||
|
||||
func (fakeProviderFactory) Create(dynamicOptions options.DynamicOptions) (identityprovider.OAuthProvider, error) {
|
||||
|
||||
@@ -1,159 +1,151 @@
|
||||
/*
|
||||
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
authuser "k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
iamv1alpha2listers "kubesphere.io/kubesphere/pkg/client/listers/iam/v1alpha2"
|
||||
)
|
||||
|
||||
type passwordAuthenticator struct {
|
||||
ksClient kubesphere.Interface
|
||||
userGetter *userGetter
|
||||
authOptions *authentication.Options
|
||||
userGetter *userMapper
|
||||
client runtimeclient.Client
|
||||
authOptions *authentication.Options
|
||||
identityProviderConfigurationGetter identityprovider.ConfigurationGetter
|
||||
}
|
||||
|
||||
func NewPasswordAuthenticator(ksClient kubesphere.Interface,
|
||||
userLister iamv1alpha2listers.UserLister,
|
||||
options *authentication.Options) PasswordAuthenticator {
|
||||
func NewPasswordAuthenticator(cacheClient runtimeclient.Client, options *authentication.Options) PasswordAuthenticator {
|
||||
passwordAuthenticator := &passwordAuthenticator{
|
||||
ksClient: ksClient,
|
||||
userGetter: &userGetter{userLister: userLister},
|
||||
authOptions: options,
|
||||
client: cacheClient,
|
||||
userGetter: &userMapper{cache: cacheClient},
|
||||
identityProviderConfigurationGetter: identityprovider.NewConfigurationGetter(cacheClient),
|
||||
authOptions: options,
|
||||
}
|
||||
return passwordAuthenticator
|
||||
}
|
||||
|
||||
func (p *passwordAuthenticator) Authenticate(_ context.Context, provider, username, password string) (authuser.Info, string, error) {
|
||||
func (p *passwordAuthenticator) Authenticate(ctx context.Context, provider, username, password string) (authuser.Info, error) {
|
||||
// empty username or password are not allowed
|
||||
if username == "" || password == "" {
|
||||
return nil, "", IncorrectPasswordError
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
if provider != "" {
|
||||
return p.authByProvider(provider, username, password)
|
||||
return p.authByProvider(ctx, provider, username, password)
|
||||
}
|
||||
return p.authByKubeSphere(username, password)
|
||||
return p.authByKubeSphere(ctx, username, password)
|
||||
}
|
||||
|
||||
// authByKubeSphere authenticate by the kubesphere user
|
||||
func (p *passwordAuthenticator) authByKubeSphere(username, password string) (authuser.Info, string, error) {
|
||||
user, err := p.userGetter.findUser(username)
|
||||
func (p *passwordAuthenticator) authByKubeSphere(ctx context.Context, username, password string) (authuser.Info, error) {
|
||||
user, err := p.userGetter.Find(ctx, username)
|
||||
if err != nil {
|
||||
// ignore not found error
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
return nil, fmt.Errorf("failed to find user: %s", err)
|
||||
}
|
||||
|
||||
if user == nil {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
|
||||
// check user status
|
||||
if user != nil && user.Status.State != iamv1alpha2.UserActive {
|
||||
if user.Status.State == iamv1alpha2.UserAuthLimitExceeded {
|
||||
klog.Errorf("%s, username: %s", RateLimitExceededError, username)
|
||||
return nil, "", RateLimitExceededError
|
||||
if user.Status.State != iamv1beta1.UserActive {
|
||||
if user.Status.State == iamv1beta1.UserAuthLimitExceeded {
|
||||
return nil, RateLimitExceededError
|
||||
} else {
|
||||
// state not active
|
||||
klog.Errorf("%s, username: %s", AccountIsNotActiveError, username)
|
||||
return nil, "", AccountIsNotActiveError
|
||||
return nil, AccountIsNotActiveError
|
||||
}
|
||||
}
|
||||
|
||||
// if the password is not empty, means that the password has been reset, even if the user was mapping from IDP
|
||||
if user != nil && user.Spec.EncryptedPassword != "" {
|
||||
if err = PasswordVerify(user.Spec.EncryptedPassword, password); err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
}
|
||||
u := &authuser.DefaultInfo{
|
||||
Name: user.Name,
|
||||
Groups: user.Spec.Groups,
|
||||
}
|
||||
// check if the password is initialized
|
||||
if uninitialized := user.Annotations[iamv1alpha2.UninitializedAnnotation]; uninitialized != "" {
|
||||
u.Extra = map[string][]string{
|
||||
iamv1alpha2.ExtraUninitialized: {uninitialized},
|
||||
}
|
||||
}
|
||||
return u, "", nil
|
||||
if user.Spec.EncryptedPassword == "" {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
|
||||
return nil, "", IncorrectPasswordError
|
||||
if err = PasswordVerify(user.Spec.EncryptedPassword, password); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := &authuser.DefaultInfo{
|
||||
Name: user.Name,
|
||||
Groups: user.Spec.Groups,
|
||||
}
|
||||
|
||||
// check if the password is initialized
|
||||
if uninitialized := user.Annotations[iamv1beta1.UninitializedAnnotation]; uninitialized != "" {
|
||||
info.Extra = map[string][]string{
|
||||
iamv1beta1.ExtraUninitialized: {uninitialized},
|
||||
}
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// authByProvider authenticate by the third-party identity provider user
|
||||
func (p *passwordAuthenticator) authByProvider(provider, username, password string) (authuser.Info, string, error) {
|
||||
providerOptions, err := p.authOptions.OAuthOptions.IdentityProviderOptions(provider)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
func (p *passwordAuthenticator) authByProvider(ctx context.Context, provider, username, password string) (authuser.Info, error) {
|
||||
genericProvider, exist := identityprovider.SharedIdentityProviderController.GetGenericProvider(provider)
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("generic identity provider %s not found", provider)
|
||||
}
|
||||
genericProvider, err := identityprovider.GetGenericProvider(providerOptions.Name)
|
||||
|
||||
providerConfig, err := p.identityProviderConfigurationGetter.GetConfiguration(ctx, provider)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
return nil, fmt.Errorf("failed to get identity provider configuration: %s", err)
|
||||
}
|
||||
authenticated, err := genericProvider.Authenticate(username, password)
|
||||
|
||||
identity, err := genericProvider.Authenticate(username, password)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if errors.IsUnauthorized(err) {
|
||||
return nil, "", IncorrectPasswordError
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
return nil, "", err
|
||||
}
|
||||
linkedAccount, err := p.userGetter.findMappedUser(providerOptions.Name, authenticated.GetUserID())
|
||||
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if linkedAccount != nil {
|
||||
return &authuser.DefaultInfo{Name: linkedAccount.Name}, provider, nil
|
||||
mappedUser, err := p.userGetter.FindMappedUser(ctx, provider, identity.GetUserID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find mapped user: %s", err)
|
||||
}
|
||||
|
||||
// the user will automatically create and mapping when login successful.
|
||||
if providerOptions.MappingMethod == oauth.MappingMethodAuto {
|
||||
if !providerOptions.DisableLoginConfirmation {
|
||||
return preRegistrationUser(providerOptions.Name, authenticated), providerOptions.Name, nil
|
||||
if mappedUser == nil {
|
||||
if providerConfig.MappingMethod == identityprovider.MappingMethodLookup {
|
||||
return nil, fmt.Errorf("failed to find mapped user: %s", identity.GetUserID())
|
||||
}
|
||||
linkedAccount, err = p.ksClient.IamV1alpha2().Users().Create(context.Background(), mappedUser(providerOptions.Name, authenticated), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, "", err
|
||||
|
||||
if providerConfig.MappingMethod == identityprovider.MappingMethodManual {
|
||||
return newRreRegistrationUser(providerConfig.Name, identity), nil
|
||||
}
|
||||
return &authuser.DefaultInfo{Name: linkedAccount.Name}, provider, nil
|
||||
|
||||
if providerConfig.MappingMethod == identityprovider.MappingMethodAuto {
|
||||
mappedUser = newMappedUser(providerConfig.Name, identity)
|
||||
|
||||
if err = p.client.Create(ctx, mappedUser); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &authuser.DefaultInfo{Name: mappedUser.GetName()}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid mapping method found %s", providerConfig.MappingMethod)
|
||||
}
|
||||
|
||||
return nil, "", err
|
||||
if mappedUser.Status.State == iamv1beta1.UserDisabled {
|
||||
return nil, AccountIsNotActiveError
|
||||
}
|
||||
|
||||
return &authuser.DefaultInfo{Name: mappedUser.GetName()}, nil
|
||||
}
|
||||
|
||||
func PasswordVerify(encryptedPassword, password string) error {
|
||||
|
||||
@@ -1,20 +1,7 @@
|
||||
/*
|
||||
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
@@ -23,20 +10,30 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache/informertest"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
runtimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/scheme"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/options"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
authuser "k8s.io/apiserver/pkg/authentication/user"
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
|
||||
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
)
|
||||
|
||||
func TestEncryptPassword(t *testing.T) {
|
||||
@@ -56,87 +53,154 @@ func hashPassword(password string) (string, error) {
|
||||
}
|
||||
|
||||
func Test_passwordAuthenticator_Authenticate(t *testing.T) {
|
||||
|
||||
identityprovider.RegisterGenericProviderFactory(&fakePasswordProviderFactory{})
|
||||
oauthOptions := &authentication.Options{
|
||||
OAuthOptions: &oauth.Options{
|
||||
IdentityProviders: []oauth.IdentityProviderOptions{
|
||||
{
|
||||
Name: "fakepwd",
|
||||
MappingMethod: "auto",
|
||||
Type: "fakePasswordProvider",
|
||||
Provider: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"user1": map[string]string{
|
||||
"uid": "100001",
|
||||
"email": "user1@kubesphere.io",
|
||||
"username": "user1",
|
||||
"password": "password",
|
||||
},
|
||||
"user2": map[string]string{
|
||||
"uid": "100002",
|
||||
"email": "user2@kubesphere.io",
|
||||
"username": "user2",
|
||||
"password": "password",
|
||||
},
|
||||
},
|
||||
},
|
||||
Issuer: &oauth.IssuerOptions{},
|
||||
}
|
||||
|
||||
fakepwd1 := &identityprovider.Configuration{
|
||||
Name: "fakepwd1",
|
||||
MappingMethod: "manual",
|
||||
Type: "fakePasswordProvider",
|
||||
ProviderOptions: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"user1": map[string]string{
|
||||
"uid": "100001",
|
||||
"email": "user1@kubesphere.io",
|
||||
"username": "user1",
|
||||
"password": "password",
|
||||
},
|
||||
{
|
||||
Name: "fakepwd2",
|
||||
MappingMethod: "auto",
|
||||
Type: "fakePasswordProvider",
|
||||
DisableLoginConfirmation: true,
|
||||
Provider: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"user5": map[string]string{
|
||||
"uid": "100005",
|
||||
"email": "user5@kubesphere.io",
|
||||
"username": "user5",
|
||||
"password": "password",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "fakepwd3",
|
||||
MappingMethod: "lookup",
|
||||
Type: "fakePasswordProvider",
|
||||
DisableLoginConfirmation: true,
|
||||
Provider: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"user6": map[string]string{
|
||||
"uid": "100006",
|
||||
"email": "user6@kubesphere.io",
|
||||
"username": "user6",
|
||||
"password": "password",
|
||||
},
|
||||
},
|
||||
},
|
||||
"user2": map[string]string{
|
||||
"uid": "100002",
|
||||
"email": "user2@kubesphere.io",
|
||||
"username": "user2",
|
||||
"password": "password",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
identityprovider.RegisterGenericProvider(&fakePasswordProviderFactory{})
|
||||
if err := identityprovider.SetupWithOptions(oauthOptions.OAuthOptions.IdentityProviders); err != nil {
|
||||
t.Fatal(err)
|
||||
fakepwd2 := &identityprovider.Configuration{
|
||||
Name: "fakepwd2",
|
||||
MappingMethod: "auto",
|
||||
Type: "fakePasswordProvider",
|
||||
ProviderOptions: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"user5": map[string]string{
|
||||
"uid": "100005",
|
||||
"email": "user5@kubesphere.io",
|
||||
"username": "user5",
|
||||
"password": "password",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ksClient := fakeks.NewSimpleClientset()
|
||||
ksInformerFactory := ksinformers.NewSharedInformerFactory(ksClient, 0)
|
||||
err := ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newUser("user1", "100001", "fakepwd"))
|
||||
_ = ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newUser("user3", "100003", ""))
|
||||
_ = ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newActiveUser("user4", "password"))
|
||||
fakepwd3 := &identityprovider.Configuration{
|
||||
Name: "fakepwd3",
|
||||
MappingMethod: "lookup",
|
||||
Type: "fakePasswordProvider",
|
||||
ProviderOptions: options.DynamicOptions{
|
||||
"identities": map[string]interface{}{
|
||||
"user6": map[string]string{
|
||||
"uid": "100006",
|
||||
"email": "user6@kubesphere.io",
|
||||
"username": "user6",
|
||||
"password": "password",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
marshal1, err := yaml.Marshal(fakepwd1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fakepwd1Secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-fake-idp",
|
||||
Namespace: "kubesphere-system",
|
||||
Labels: map[string]string{
|
||||
constants.GenericConfigTypeLabel: identityprovider.ConfigTypeIdentityProvider,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"configuration.yaml": marshal1,
|
||||
},
|
||||
Type: identityprovider.SecretTypeIdentityProvider,
|
||||
}
|
||||
|
||||
marshal2, err := yaml.Marshal(fakepwd2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fakepwd2Secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-fake-idp2",
|
||||
Namespace: "kubesphere-system",
|
||||
Labels: map[string]string{
|
||||
constants.GenericConfigTypeLabel: identityprovider.ConfigTypeIdentityProvider,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"configuration.yaml": marshal2,
|
||||
},
|
||||
Type: identityprovider.SecretTypeIdentityProvider,
|
||||
}
|
||||
|
||||
marshal3, err := yaml.Marshal(fakepwd3)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fakepwd3Secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-fake-idp3",
|
||||
Namespace: "kubesphere-system",
|
||||
Labels: map[string]string{
|
||||
constants.GenericConfigTypeLabel: identityprovider.ConfigTypeIdentityProvider,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"configuration.yaml": marshal3,
|
||||
},
|
||||
Type: identityprovider.SecretTypeIdentityProvider,
|
||||
}
|
||||
|
||||
client := runtimefakeclient.NewClientBuilder().
|
||||
WithScheme(scheme.Scheme).
|
||||
WithRuntimeObjects(
|
||||
newUser("user1", "100001", "fakepwd1"),
|
||||
newUser("user3", "100003", ""),
|
||||
newActiveUser("user4", "password"),
|
||||
fakepwd1Secret,
|
||||
fakepwd2Secret,
|
||||
fakepwd3Secret,
|
||||
).
|
||||
Build()
|
||||
|
||||
fakeCache := informertest.FakeInformers{Scheme: scheme.Scheme}
|
||||
err = fakeCache.Start(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeSecretInformer, err := fakeCache.FakeInformerFor(context.Background(), &v1.Secret{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
authenticator := NewPasswordAuthenticator(
|
||||
ksClient,
|
||||
ksInformerFactory.Iam().V1alpha2().Users().Lister(),
|
||||
oauthOptions,
|
||||
)
|
||||
identityprovider.RegisterOAuthProviderFactory(&fakeProviderFactory{})
|
||||
identityprovider.SharedIdentityProviderController = identityprovider.NewController()
|
||||
err = identityprovider.SharedIdentityProviderController.WatchConfigurationChanges(context.Background(), &fakeCache)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fakeSecretInformer.Add(fakepwd1Secret)
|
||||
fakeSecretInformer.Add(fakepwd2Secret)
|
||||
fakeSecretInformer.Add(fakepwd3Secret)
|
||||
|
||||
authenticator := NewPasswordAuthenticator(client, oauthOptions)
|
||||
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
@@ -159,7 +223,7 @@ func Test_passwordAuthenticator_Authenticate(t *testing.T) {
|
||||
ctx: context.Background(),
|
||||
username: "user1",
|
||||
password: "password",
|
||||
provider: "fakepwd",
|
||||
provider: "fakepwd1",
|
||||
},
|
||||
want: &user.DefaultInfo{
|
||||
Name: "user1",
|
||||
@@ -173,13 +237,13 @@ func Test_passwordAuthenticator_Authenticate(t *testing.T) {
|
||||
ctx: context.Background(),
|
||||
username: "user2",
|
||||
password: "password",
|
||||
provider: "fakepwd",
|
||||
provider: "fakepwd1",
|
||||
},
|
||||
want: &user.DefaultInfo{
|
||||
Name: "system:pre-registration",
|
||||
Extra: map[string][]string{
|
||||
"email": {"user2@kubesphere.io"},
|
||||
"idp": {"fakepwd"},
|
||||
"idp": {"fakepwd1"},
|
||||
"uid": {"100002"},
|
||||
"username": {"user2"},
|
||||
},
|
||||
@@ -236,7 +300,7 @@ func Test_passwordAuthenticator_Authenticate(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := tt.passwordAuthenticator
|
||||
got, _, err := p.Authenticate(tt.args.ctx, tt.args.provider, tt.args.username, tt.args.password)
|
||||
got, err := p.Authenticate(tt.args.ctx, tt.args.provider, tt.args.username, tt.args.password)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("passwordAuthenticator.Authenticate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -298,10 +362,10 @@ func encrypt(password string) (string, error) {
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
func newActiveUser(username string, password string) *iamv1alpha2.User {
|
||||
func newActiveUser(username string, password string) *iamv1beta1.User {
|
||||
u := newUser(username, "", "")
|
||||
password, _ = encrypt(password)
|
||||
u.Spec.EncryptedPassword = password
|
||||
u.Status.State = iamv1alpha2.UserActive
|
||||
u.Status.State = iamv1beta1.UserActive
|
||||
return u
|
||||
}
|
||||
|
||||
@@ -1,20 +1,7 @@
|
||||
/*
|
||||
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
@@ -51,27 +38,30 @@ type tokenOperator struct {
|
||||
cache cache.Interface
|
||||
}
|
||||
|
||||
func (t tokenOperator) Revoke(token string) error {
|
||||
func (t *tokenOperator) Revoke(token string) error {
|
||||
pattern := fmt.Sprintf("kubesphere:user:*:token:%s", token)
|
||||
if keys, err := t.cache.Keys(pattern); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
} else if len(keys) > 0 {
|
||||
if err := t.cache.Del(keys...); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTokenOperator(cache cache.Interface, issuer token.Issuer, options *authentication.Options) TokenManagementInterface {
|
||||
func NewTokenOperator(cache cache.Interface, options *authentication.Options) (TokenManagementInterface, error) {
|
||||
issuer, err := token.NewIssuer(options.Issuer)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create token issuer: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
operator := &tokenOperator{
|
||||
issuer: issuer,
|
||||
options: options,
|
||||
cache: cache,
|
||||
}
|
||||
return operator
|
||||
return operator, nil
|
||||
}
|
||||
|
||||
func (t *tokenOperator) Verify(tokenStr string) (*token.VerifiedResponse, error) {
|
||||
@@ -79,7 +69,7 @@ func (t *tokenOperator) Verify(tokenStr string) (*token.VerifiedResponse, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t.options.OAuthOptions.AccessTokenMaxAge == 0 ||
|
||||
if t.options.Issuer.AccessTokenMaxAge == 0 ||
|
||||
response.TokenType == token.StaticToken {
|
||||
return response, nil
|
||||
}
|
||||
@@ -92,12 +82,10 @@ func (t *tokenOperator) Verify(tokenStr string) (*token.VerifiedResponse, error)
|
||||
func (t *tokenOperator) IssueTo(request *token.IssueRequest) (string, error) {
|
||||
tokenStr, err := t.issuer.IssueTo(request)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return "", err
|
||||
}
|
||||
if request.ExpiresIn > 0 {
|
||||
if err = t.cacheToken(request.User.GetName(), tokenStr, request.ExpiresIn); err != nil {
|
||||
klog.Error(err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@@ -108,11 +96,9 @@ func (t *tokenOperator) IssueTo(request *token.IssueRequest) (string, error) {
|
||||
func (t *tokenOperator) RevokeAllUserTokens(username string) error {
|
||||
pattern := fmt.Sprintf("kubesphere:user:%s:token:*", username)
|
||||
if keys, err := t.cache.Keys(pattern); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
} else if len(keys) > 0 {
|
||||
if err := t.cache.Del(keys...); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,59 +1,50 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package components
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/resource/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
type ComponentsGetter interface {
|
||||
type Getter interface {
|
||||
GetComponentStatus(name string) (v1alpha2.ComponentStatus, error)
|
||||
GetSystemHealthStatus() (v1alpha2.HealthStatus, error)
|
||||
GetAllComponentsStatus() ([]v1alpha2.ComponentStatus, error)
|
||||
}
|
||||
|
||||
type componentsGetter struct {
|
||||
informers informers.SharedInformerFactory
|
||||
cache runtimeclient.Reader
|
||||
}
|
||||
|
||||
func NewComponentsGetter(informers informers.SharedInformerFactory) ComponentsGetter {
|
||||
return &componentsGetter{informers: informers}
|
||||
func NewComponentsGetter(cache runtimeclient.Reader) Getter {
|
||||
return &componentsGetter{cache: cache}
|
||||
}
|
||||
|
||||
func (c *componentsGetter) GetComponentStatus(name string) (v1alpha2.ComponentStatus, error) {
|
||||
|
||||
var service *corev1.Service
|
||||
service := &corev1.Service{}
|
||||
var err error
|
||||
|
||||
for _, ns := range constants.SystemNamespaces {
|
||||
service, err = c.informers.Core().V1().Services().Lister().Services(ns).Get(name)
|
||||
if err == nil {
|
||||
if err := c.cache.Get(context.Background(), types.NamespacedName{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
}, service); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return v1alpha2.ComponentStatus{}, err
|
||||
}
|
||||
@@ -62,9 +53,11 @@ func (c *componentsGetter) GetComponentStatus(name string) (v1alpha2.ComponentSt
|
||||
return v1alpha2.ComponentStatus{}, fmt.Errorf("component %s has no selector", name)
|
||||
}
|
||||
|
||||
pods, err := c.informers.Core().V1().Pods().Lister().Pods(service.Namespace).List(labels.SelectorFromValidatedSet(service.Spec.Selector))
|
||||
|
||||
if err != nil {
|
||||
pods := &corev1.PodList{}
|
||||
if err := c.cache.List(context.Background(), pods, &runtimeclient.ListOptions{
|
||||
LabelSelector: labels.SelectorFromValidatedSet(service.Spec.Selector),
|
||||
Namespace: service.Namespace,
|
||||
}); err != nil {
|
||||
return v1alpha2.ComponentStatus{}, err
|
||||
}
|
||||
|
||||
@@ -76,9 +69,9 @@ func (c *componentsGetter) GetComponentStatus(name string) (v1alpha2.ComponentSt
|
||||
HealthyBackends: 0,
|
||||
TotalBackends: 0,
|
||||
}
|
||||
for _, pod := range pods {
|
||||
for _, pod := range pods.Items {
|
||||
component.TotalBackends++
|
||||
if pod.Status.Phase == corev1.PodRunning && isAllContainersReady(pod) {
|
||||
if pod.Status.Phase == corev1.PodRunning && isAllContainersReady(&pod) {
|
||||
component.HealthyBackends++
|
||||
}
|
||||
}
|
||||
@@ -106,16 +99,16 @@ func (c *componentsGetter) GetSystemHealthStatus() (v1alpha2.HealthStatus, error
|
||||
|
||||
status.KubeSphereComponents = components
|
||||
|
||||
nodes := &corev1.NodeList{}
|
||||
// get node status
|
||||
nodes, err := c.informers.Core().V1().Nodes().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
if err := c.cache.List(context.Background(), nodes); err != nil {
|
||||
klog.Errorln(err)
|
||||
return status, nil
|
||||
}
|
||||
|
||||
totalNodes := 0
|
||||
healthyNodes := 0
|
||||
for _, nodes := range nodes {
|
||||
for _, nodes := range nodes.Items {
|
||||
totalNodes++
|
||||
for _, condition := range nodes.Status.Conditions {
|
||||
if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue {
|
||||
@@ -138,15 +131,15 @@ func (c *componentsGetter) GetAllComponentsStatus() ([]v1alpha2.ComponentStatus,
|
||||
var err error
|
||||
for _, ns := range constants.SystemNamespaces {
|
||||
|
||||
services, err := c.informers.Core().V1().Services().Lister().Services(ns).List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
services := &corev1.ServiceList{}
|
||||
if err := c.cache.List(context.Background(), services, &runtimeclient.ListOptions{
|
||||
Namespace: ns,
|
||||
}); err != nil {
|
||||
klog.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
|
||||
for _, service := range services.Items {
|
||||
// skip services without a selector
|
||||
if len(service.Spec.Selector) == 0 {
|
||||
continue
|
||||
@@ -161,16 +154,23 @@ func (c *componentsGetter) GetAllComponentsStatus() ([]v1alpha2.ComponentStatus,
|
||||
TotalBackends: 0,
|
||||
}
|
||||
|
||||
pods, err := c.informers.Core().V1().Pods().Lister().Pods(ns).List(labels.SelectorFromValidatedSet(service.Spec.Selector))
|
||||
pods := &corev1.PodList{}
|
||||
if err := c.cache.List(context.Background(), pods, &runtimeclient.ListOptions{
|
||||
LabelSelector: labels.SelectorFromValidatedSet(service.Spec.Selector),
|
||||
Namespace: ns,
|
||||
}); err != nil {
|
||||
klog.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
for _, pod := range pods.Items {
|
||||
component.TotalBackends++
|
||||
if pod.Status.Phase == corev1.PodRunning && isAllContainersReady(pod) {
|
||||
if pod.Status.Phase == corev1.PodRunning && isAllContainersReady(&pod) {
|
||||
component.HealthyBackends++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package components
|
||||
|
||||
@@ -21,12 +10,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/scheme"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/resource/v1alpha2"
|
||||
)
|
||||
@@ -109,6 +101,10 @@ func nodes(name string, healthNodes, totalNodes int) []runtime.Object {
|
||||
return ns
|
||||
}
|
||||
|
||||
var _ = Describe("Components", func() {
|
||||
|
||||
})
|
||||
|
||||
func TestGetSystemHealthStatus(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
@@ -221,30 +217,17 @@ func TestGetSystemHealthStatus(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ps := pods(test.name, test.namespace, test.labels, test.healthPods, test.totalPods)
|
||||
pods := pods(test.name, test.namespace, test.labels, test.healthPods, test.totalPods)
|
||||
svc := service(test.name, test.namespace, test.labels)
|
||||
ns := nodes(test.name, test.healthNodes, test.totalNodes)
|
||||
nodes := nodes(test.name, test.healthNodes, test.totalNodes)
|
||||
|
||||
var objs []runtime.Object
|
||||
objs = append(objs, ps...)
|
||||
objs = append(objs, svc)
|
||||
objs = append(objs, ns...)
|
||||
client := runtimefakeclient.NewClientBuilder().
|
||||
WithScheme(scheme.Scheme).
|
||||
WithRuntimeObjects(pods...).
|
||||
WithRuntimeObjects(svc).
|
||||
WithRuntimeObjects(nodes...).Build()
|
||||
|
||||
client := fake.NewSimpleClientset(objs...)
|
||||
|
||||
informer := informers.NewSharedInformerFactory(client, time.Minute*10)
|
||||
|
||||
informer.Core().V1().Services().Informer().GetIndexer().Add(svc)
|
||||
|
||||
for _, obj := range ps {
|
||||
informer.Core().V1().Pods().Informer().GetIndexer().Add(obj)
|
||||
}
|
||||
|
||||
for _, obj := range ns {
|
||||
informer.Core().V1().Nodes().Informer().GetIndexer().Add(obj)
|
||||
}
|
||||
|
||||
c := NewComponentsGetter(informer)
|
||||
c := NewComponentsGetter(client)
|
||||
healthStatus, err := c.GetSystemHealthStatus()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -340,24 +323,15 @@ func TestGetComponentStatus(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ps := pods(test.name, test.namespace, test.labels, test.healthPods, test.totalPods)
|
||||
pods := pods(test.name, test.namespace, test.labels, test.healthPods, test.totalPods)
|
||||
svc := service(test.name, test.namespace, test.labels)
|
||||
|
||||
var objs []runtime.Object
|
||||
objs = append(objs, ps...)
|
||||
objs = append(objs, svc)
|
||||
client := runtimefakeclient.NewClientBuilder().
|
||||
WithScheme(scheme.Scheme).
|
||||
WithRuntimeObjects(pods...).
|
||||
WithRuntimeObjects(svc).Build()
|
||||
|
||||
client := fake.NewSimpleClientset(objs...)
|
||||
|
||||
informer := informers.NewSharedInformerFactory(client, time.Minute*10)
|
||||
|
||||
informer.Core().V1().Services().Informer().GetIndexer().Add(svc)
|
||||
|
||||
for _, obj := range ps {
|
||||
informer.Core().V1().Pods().Informer().GetIndexer().Add(obj)
|
||||
}
|
||||
|
||||
c := NewComponentsGetter(informer)
|
||||
c := NewComponentsGetter(client)
|
||||
healthStatus, err := c.GetComponentStatus(test.name)
|
||||
if err == nil && test.expectedError {
|
||||
t.Fatalf("expected error while got nothing")
|
||||
|
||||
38
pkg/models/composedapp/options.go
Normal file
38
pkg/models/composedapp/options.go
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package composedapp
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
// KubeSphere is using sigs.k8s.io/application as fundamental object to implement Application Management.
|
||||
// There are other projects also built on sigs.k8s.io/application, when KubeSphere installed along side
|
||||
// them, conflicts happen. So we leave an option to only reconcile applications matched with the given
|
||||
// selector. Default will reconcile all applications.
|
||||
// For example
|
||||
// "kubesphere.io/creator=" means reconcile applications with this label key
|
||||
// "!kubesphere.io/creator" means exclude applications with this key
|
||||
AppSelector string `json:"appSelector,omitempty" yaml:"appSelector,omitempty" mapstructure:"appSelector,omitempty"`
|
||||
}
|
||||
|
||||
func NewOptions() *Options {
|
||||
return &Options{}
|
||||
}
|
||||
|
||||
func (o *Options) Validate() []error {
|
||||
var err []error
|
||||
if _, validateErr := labels.Parse(o.AppSelector); validateErr != nil {
|
||||
err = append(err, validateErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet, s *Options) {
|
||||
fs.StringVar(&o.AppSelector, "app-selector", s.AppSelector, "Selector to filter k8s applications to reconcile")
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
approvers:
|
||||
- shaowenchen
|
||||
- linuxsuren
|
||||
|
||||
reviewers:
|
||||
- runzexia
|
||||
- soulseen
|
||||
- shaowenchen
|
||||
- linuxsuren
|
||||
|
||||
labels:
|
||||
- area/devops
|
||||
@@ -1,325 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
func GetColumnsFromStruct(s interface{}) []string {
|
||||
names := structs.Names(s)
|
||||
for i, name := range names {
|
||||
names[i] = stringutils.CamelCaseToUnderscore(name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func GetColumnsFromStructWithPrefix(prefix string, s interface{}) []string {
|
||||
names := structs.Names(s)
|
||||
for i, name := range names {
|
||||
names[i] = WithPrefix(prefix, stringutils.CamelCaseToUnderscore(name))
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func WithPrefix(prefix, str string) string {
|
||||
return prefix + "." + str
|
||||
}
|
||||
|
||||
const (
|
||||
StatusActive = "active"
|
||||
StatusDeleted = "deleted"
|
||||
StatusDeleting = "deleting"
|
||||
StatusFailed = "failed"
|
||||
StatusPending = "pending"
|
||||
StatusWorking = "working"
|
||||
StatusSuccessful = "successful"
|
||||
)
|
||||
|
||||
const (
|
||||
StatusColumn = "status"
|
||||
StatusTimeColumn = "status_time"
|
||||
)
|
||||
|
||||
const (
|
||||
VisibilityPrivate = "private"
|
||||
VisibilityPublic = "public"
|
||||
)
|
||||
|
||||
const (
|
||||
KS_ADMIN = "admin"
|
||||
)
|
||||
|
||||
// define roles of DevOps
|
||||
const (
|
||||
ProjectOwner = "owner"
|
||||
ProjectMaintainer = "maintainer"
|
||||
ProjectDeveloper = "developer"
|
||||
ProjectReporter = "reporter"
|
||||
)
|
||||
|
||||
const (
|
||||
JenkinsAllUserRoleName = "kubesphere-user"
|
||||
JenkinsAdminRoleName = "admin"
|
||||
)
|
||||
|
||||
type Role struct {
|
||||
Name string `json:"name" description:"role's name e.g. owner'"`
|
||||
Description string `json:"description" description:"role 's description'"`
|
||||
}
|
||||
|
||||
var DefaultRoles = []*Role{
|
||||
{
|
||||
Name: ProjectOwner,
|
||||
Description: "Owner have access to do all the operations of a DevOps project and own the highest permissions as well.",
|
||||
},
|
||||
{
|
||||
Name: ProjectMaintainer,
|
||||
Description: "Maintainer have access to manage pipeline and credential configuration in a DevOps project.",
|
||||
},
|
||||
{
|
||||
Name: ProjectDeveloper,
|
||||
Description: "Developer is able to view and trigger the pipeline.",
|
||||
},
|
||||
{
|
||||
Name: ProjectReporter,
|
||||
Description: "Reporter is only allowed to view the status of the pipeline.",
|
||||
},
|
||||
}
|
||||
|
||||
var AllRoleSlice = []string{ProjectDeveloper, ProjectReporter, ProjectMaintainer, ProjectOwner}
|
||||
|
||||
// define the permission matrix of owner
|
||||
var JenkinsOwnerProjectPermissionIds = &devops.ProjectPermissionIds{
|
||||
CredentialCreate: true,
|
||||
CredentialDelete: true,
|
||||
CredentialManageDomains: true,
|
||||
CredentialUpdate: true,
|
||||
CredentialView: true,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: true,
|
||||
ItemCreate: true,
|
||||
ItemDelete: true,
|
||||
ItemDiscover: true,
|
||||
ItemMove: true,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: true,
|
||||
}
|
||||
|
||||
// define the permission matrix of DevOps, including owner, maintainer, developer, reporter
|
||||
var JenkinsProjectPermissionMap = map[string]devops.ProjectPermissionIds{
|
||||
ProjectOwner: {
|
||||
CredentialCreate: true,
|
||||
CredentialDelete: true,
|
||||
CredentialManageDomains: true,
|
||||
CredentialUpdate: true,
|
||||
CredentialView: true,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: true,
|
||||
ItemCreate: true,
|
||||
ItemDelete: true,
|
||||
ItemDiscover: true,
|
||||
ItemMove: true,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: true,
|
||||
},
|
||||
ProjectMaintainer: {
|
||||
CredentialCreate: true,
|
||||
CredentialDelete: true,
|
||||
CredentialManageDomains: true,
|
||||
CredentialUpdate: true,
|
||||
CredentialView: true,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: false,
|
||||
ItemCreate: true,
|
||||
ItemDelete: false,
|
||||
ItemDiscover: true,
|
||||
ItemMove: false,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: true,
|
||||
},
|
||||
ProjectDeveloper: {
|
||||
CredentialCreate: false,
|
||||
CredentialDelete: false,
|
||||
CredentialManageDomains: false,
|
||||
CredentialUpdate: false,
|
||||
CredentialView: false,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: false,
|
||||
ItemCreate: false,
|
||||
ItemDelete: false,
|
||||
ItemDiscover: true,
|
||||
ItemMove: false,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: false,
|
||||
},
|
||||
ProjectReporter: {
|
||||
CredentialCreate: false,
|
||||
CredentialDelete: false,
|
||||
CredentialManageDomains: false,
|
||||
CredentialUpdate: false,
|
||||
CredentialView: false,
|
||||
ItemBuild: false,
|
||||
ItemCancel: false,
|
||||
ItemConfigure: false,
|
||||
ItemCreate: false,
|
||||
ItemDelete: false,
|
||||
ItemDiscover: true,
|
||||
ItemMove: false,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: false,
|
||||
RunDelete: false,
|
||||
RunReplay: false,
|
||||
RunUpdate: false,
|
||||
SCMTag: false,
|
||||
},
|
||||
}
|
||||
|
||||
// define the permission matrix of pipeline, including owner, maintainer, developer, reporter
|
||||
var JenkinsPipelinePermissionMap = map[string]devops.ProjectPermissionIds{
|
||||
ProjectOwner: {
|
||||
CredentialCreate: true,
|
||||
CredentialDelete: true,
|
||||
CredentialManageDomains: true,
|
||||
CredentialUpdate: true,
|
||||
CredentialView: true,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: true,
|
||||
ItemCreate: true,
|
||||
ItemDelete: true,
|
||||
ItemDiscover: true,
|
||||
ItemMove: true,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: true,
|
||||
},
|
||||
ProjectMaintainer: {
|
||||
CredentialCreate: true,
|
||||
CredentialDelete: true,
|
||||
CredentialManageDomains: true,
|
||||
CredentialUpdate: true,
|
||||
CredentialView: true,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: true,
|
||||
ItemCreate: true,
|
||||
ItemDelete: true,
|
||||
ItemDiscover: true,
|
||||
ItemMove: true,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: true,
|
||||
},
|
||||
ProjectDeveloper: {
|
||||
CredentialCreate: false,
|
||||
CredentialDelete: false,
|
||||
CredentialManageDomains: false,
|
||||
CredentialUpdate: false,
|
||||
CredentialView: false,
|
||||
ItemBuild: true,
|
||||
ItemCancel: true,
|
||||
ItemConfigure: false,
|
||||
ItemCreate: false,
|
||||
ItemDelete: false,
|
||||
ItemDiscover: true,
|
||||
ItemMove: false,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: true,
|
||||
RunDelete: true,
|
||||
RunReplay: true,
|
||||
RunUpdate: true,
|
||||
SCMTag: false,
|
||||
},
|
||||
ProjectReporter: {
|
||||
CredentialCreate: false,
|
||||
CredentialDelete: false,
|
||||
CredentialManageDomains: false,
|
||||
CredentialUpdate: false,
|
||||
CredentialView: false,
|
||||
ItemBuild: false,
|
||||
ItemCancel: false,
|
||||
ItemConfigure: false,
|
||||
ItemCreate: false,
|
||||
ItemDelete: false,
|
||||
ItemDiscover: true,
|
||||
ItemMove: false,
|
||||
ItemRead: true,
|
||||
ItemWorkspace: false,
|
||||
RunDelete: false,
|
||||
RunReplay: false,
|
||||
RunUpdate: false,
|
||||
SCMTag: false,
|
||||
},
|
||||
}
|
||||
|
||||
// get roleName of the project
|
||||
func GetProjectRoleName(projectId, role string) string {
|
||||
return fmt.Sprintf("%s-%s-project", projectId, role)
|
||||
}
|
||||
|
||||
// get roleName of the pipeline
|
||||
func GetPipelineRoleName(projectId, role string) string {
|
||||
return fmt.Sprintf("%s-%s-pipeline", projectId, role)
|
||||
}
|
||||
|
||||
// get pattern string of the project
|
||||
func GetProjectRolePattern(projectId string) string {
|
||||
return fmt.Sprintf("^%s$", projectId)
|
||||
}
|
||||
|
||||
// get pattern string of the project
|
||||
func GetPipelineRolePattern(projectId string) string {
|
||||
return fmt.Sprintf("^%s/.*", projectId)
|
||||
}
|
||||
|
||||
// get unified sync current time
|
||||
func GetSyncNowTime() string {
|
||||
return time.Now().String()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,130 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops/fake"
|
||||
)
|
||||
|
||||
const baseUrl = "http://127.0.0.1/kapis/devops.kubesphere.io/v1alpha2/"
|
||||
|
||||
func TestGetNodesDetail(t *testing.T) {
|
||||
fakeData := make(map[string]interface{})
|
||||
PipelineRunNodes := []devops.PipelineRunNodes{
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "1",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "2",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "3",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
}
|
||||
|
||||
NodeSteps := []devops.NodeSteps{
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "1",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
}
|
||||
|
||||
fakeData["project1-pipeline1-run1"] = PipelineRunNodes
|
||||
fakeData["project1-pipeline1-run1-1"] = NodeSteps
|
||||
fakeData["project1-pipeline1-run1-2"] = NodeSteps
|
||||
fakeData["project1-pipeline1-run1-3"] = NodeSteps
|
||||
|
||||
devopsClient := fake.NewFakeDevops(fakeData)
|
||||
|
||||
devopsOperator := NewDevopsOperator(devopsClient, nil, nil, nil, nil)
|
||||
|
||||
httpReq, _ := http.NewRequest(http.MethodGet, baseUrl+"devops/project1/pipelines/pipeline1/runs/run1/nodesdetail/?limit=10000", nil)
|
||||
|
||||
nodesDetails, err := devopsOperator.GetNodesDetail("project1", "pipeline1", "run1", httpReq)
|
||||
if err != nil || nodesDetails == nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
|
||||
for _, v := range nodesDetails {
|
||||
if v.Steps[0].ID == "" {
|
||||
t.Fatalf("Can not get any step.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBranchNodesDetail(t *testing.T) {
|
||||
fakeData := make(map[string]interface{})
|
||||
|
||||
BranchPipelineRunNodes := []devops.BranchPipelineRunNodes{
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "1",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "2",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "3",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
}
|
||||
|
||||
BranchNodeSteps := []devops.NodeSteps{
|
||||
{
|
||||
DisplayName: "Deploy to Kubernetes",
|
||||
ID: "1",
|
||||
Result: "SUCCESS",
|
||||
},
|
||||
}
|
||||
|
||||
fakeData["project1-pipeline1-branch1-run1"] = BranchPipelineRunNodes
|
||||
fakeData["project1-pipeline1-branch1-run1-1"] = BranchNodeSteps
|
||||
fakeData["project1-pipeline1-branch1-run1-2"] = BranchNodeSteps
|
||||
fakeData["project1-pipeline1-branch1-run1-3"] = BranchNodeSteps
|
||||
|
||||
devopsClient := fake.NewFakeDevops(fakeData)
|
||||
|
||||
devopsOperator := NewDevopsOperator(devopsClient, nil, nil, nil, nil)
|
||||
|
||||
httpReq, _ := http.NewRequest(http.MethodGet, baseUrl+"devops/project1/pipelines/pipeline1/branchs/branch1/runs/run1/nodesdetail/?limit=10000", nil)
|
||||
|
||||
nodesDetails, err := devopsOperator.GetBranchNodesDetail("project1", "pipeline1", "branch1", "run1", httpReq)
|
||||
if err != nil || nodesDetails == nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
|
||||
for _, v := range nodesDetails {
|
||||
if v.Steps[0].ID == "" {
|
||||
t.Fatalf("Can not get any step.")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
type JkError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
|
||||
func (err *JkError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/devops/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
)
|
||||
|
||||
var DevOpsProjectColumns = GetColumnsFromStruct(&v1alpha2.DevOpsProject{})
|
||||
|
||||
const (
|
||||
DevOpsProjectTableName = "project"
|
||||
DevOpsProjectPrefix = "project-"
|
||||
DevOpsProjectDescriptionColumn = "description"
|
||||
DevOpsProjectIdColumn = "project.project_id"
|
||||
DevOpsProjectNameColumn = "project.name"
|
||||
DevOpsProjectExtraColumn = "project.extra"
|
||||
DevOpsProjectWorkSpaceColumn = "project.workspace"
|
||||
DevOpsProjectCreateTimeColumn = "project.create_time"
|
||||
)
|
||||
|
||||
func NewDevOpsProject(name, description, creator, extra, workspace string) *v1alpha2.DevOpsProject {
|
||||
return &v1alpha2.DevOpsProject{
|
||||
ProjectId: idutils.GetUuid(DevOpsProjectPrefix),
|
||||
Name: name,
|
||||
Description: description,
|
||||
Creator: creator,
|
||||
CreateTime: time.Now(),
|
||||
Status: StatusActive,
|
||||
Visibility: VisibilityPrivate,
|
||||
Extra: extra,
|
||||
Workspace: workspace,
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
type ProjectCredentialGetter interface {
|
||||
GetProjectCredentialUsage(projectId, credentialId string) (*devops.Credential, error)
|
||||
}
|
||||
|
||||
type projectCredentialGetter struct {
|
||||
devopsClient devops.Interface
|
||||
}
|
||||
|
||||
// GetProjectCredentialUsage get the usage of Credential
|
||||
func (o *projectCredentialGetter) GetProjectCredentialUsage(projectId, credentialId string) (*devops.Credential, error) {
|
||||
credential, err := o.devopsClient.GetCredentialInProject(projectId,
|
||||
credentialId)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, err
|
||||
}
|
||||
return credential, nil
|
||||
}
|
||||
|
||||
func NewProjectCredentialOperator(devopsClient devops.Interface) ProjectCredentialGetter {
|
||||
return &projectCredentialGetter{devopsClient: devopsClient}
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/sonarqube"
|
||||
)
|
||||
|
||||
type PipelineSonarGetter interface {
|
||||
GetPipelineSonar(projectId, pipelineId string) ([]*sonarqube.SonarStatus, error)
|
||||
GetMultiBranchPipelineSonar(projectId, pipelineId, branchId string) ([]*sonarqube.SonarStatus, error)
|
||||
}
|
||||
type pipelineSonarGetter struct {
|
||||
devops.BuildGetter
|
||||
sonarqube.SonarInterface
|
||||
}
|
||||
|
||||
func NewPipelineSonarGetter(devopClient devops.BuildGetter, sonarClient sonarqube.SonarInterface) PipelineSonarGetter {
|
||||
return &pipelineSonarGetter{
|
||||
BuildGetter: devopClient,
|
||||
SonarInterface: sonarClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *pipelineSonarGetter) GetPipelineSonar(projectId, pipelineId string) ([]*sonarqube.SonarStatus, error) {
|
||||
|
||||
build, err := g.GetProjectPipelineBuildByType(projectId, pipelineId, devops.LastBuild)
|
||||
if err != nil && errors.GetServiceErrorCode(err) != http.StatusNotFound {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, err
|
||||
} else if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, nil
|
||||
}
|
||||
var taskIds []string
|
||||
for _, action := range build.Actions {
|
||||
if action.ClassName == sonarqube.SonarAnalysisActionClass {
|
||||
taskIds = append(taskIds, action.SonarTaskId)
|
||||
}
|
||||
}
|
||||
var sonarStatus []*sonarqube.SonarStatus
|
||||
|
||||
if len(taskIds) != 0 {
|
||||
sonarStatus, err = g.GetSonarResultsByTaskIds(taskIds...)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
} else if len(taskIds) == 0 {
|
||||
build, err := g.GetProjectPipelineBuildByType(projectId, pipelineId, devops.LastCompletedBuild)
|
||||
if err != nil && errors.GetServiceErrorCode(err) != http.StatusNotFound {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(errors.GetServiceErrorCode(err), err.Error())
|
||||
} else if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, nil
|
||||
}
|
||||
for _, action := range build.Actions {
|
||||
if action.ClassName == sonarqube.SonarAnalysisActionClass {
|
||||
taskIds = append(taskIds, action.SonarTaskId)
|
||||
}
|
||||
}
|
||||
sonarStatus, err = g.GetSonarResultsByTaskIds(taskIds...)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
return sonarStatus, nil
|
||||
}
|
||||
|
||||
func (g *pipelineSonarGetter) GetMultiBranchPipelineSonar(projectId, pipelineId, branchId string) ([]*sonarqube.SonarStatus, error) {
|
||||
|
||||
build, err := g.GetMultiBranchPipelineBuildByType(projectId, pipelineId, branchId, devops.LastBuild)
|
||||
if err != nil && errors.GetServiceErrorCode(err) != http.StatusNotFound {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(errors.GetServiceErrorCode(err), err.Error())
|
||||
} else if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, nil
|
||||
}
|
||||
var taskIds []string
|
||||
for _, action := range build.Actions {
|
||||
if action.ClassName == sonarqube.SonarAnalysisActionClass {
|
||||
taskIds = append(taskIds, action.SonarTaskId)
|
||||
}
|
||||
}
|
||||
var sonarStatus []*sonarqube.SonarStatus
|
||||
|
||||
if len(taskIds) != 0 {
|
||||
sonarStatus, err = g.GetSonarResultsByTaskIds(taskIds...)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
} else if len(taskIds) == 0 {
|
||||
build, err := g.GetMultiBranchPipelineBuildByType(projectId, pipelineId, branchId, devops.LastCompletedBuild)
|
||||
if err != nil && errors.GetServiceErrorCode(err) != http.StatusNotFound {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(errors.GetServiceErrorCode(err), err.Error())
|
||||
} else if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, nil
|
||||
}
|
||||
for _, action := range build.Actions {
|
||||
if action.ClassName == sonarqube.SonarAnalysisActionClass {
|
||||
taskIds = append(taskIds, action.SonarTaskId)
|
||||
}
|
||||
}
|
||||
sonarStatus, err = g.GetSonarResultsByTaskIds(taskIds...)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
return sonarStatus, nil
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"code.cloudfoundry.org/bytefmt"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
awsS3 "github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/devops/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3"
|
||||
)
|
||||
|
||||
const (
|
||||
GetS2iBinaryURL = "http://ks-apiserver.kubesphere-system.svc/kapis/devops.kubesphere.io/v1alpha2/namespaces/%s/s2ibinaries/%s/file/%s"
|
||||
)
|
||||
|
||||
type S2iBinaryUploader interface {
|
||||
UploadS2iBinary(namespace, name, md5 string, header *multipart.FileHeader) (*v1alpha1.S2iBinary, error)
|
||||
|
||||
DownloadS2iBinary(namespace, name, fileName string) (string, error)
|
||||
}
|
||||
|
||||
type s2iBinaryUploader struct {
|
||||
client versioned.Interface
|
||||
informers externalversions.SharedInformerFactory
|
||||
s3Client s3.Interface
|
||||
}
|
||||
|
||||
func NewS2iBinaryUploader(client versioned.Interface, informers externalversions.SharedInformerFactory, s3Client s3.Interface) S2iBinaryUploader {
|
||||
return &s2iBinaryUploader{
|
||||
client: client,
|
||||
informers: informers,
|
||||
s3Client: s3Client,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *s2iBinaryUploader) UploadS2iBinary(namespace, name, md5 string, fileHeader *multipart.FileHeader) (*v1alpha1.S2iBinary, error) {
|
||||
binFile, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer binFile.Close()
|
||||
|
||||
origin, err := s.informers.Devops().V1alpha1().S2iBinaries().Lister().S2iBinaries(namespace).Get(name)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, err
|
||||
}
|
||||
//Check file is uploading
|
||||
if origin.Status.Phase == v1alpha1.StatusUploading {
|
||||
err := restful.NewError(http.StatusConflict, "file is uploading, please try later")
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
copy := origin.DeepCopy()
|
||||
copy.Spec.MD5 = md5
|
||||
copy.Spec.Size = bytefmt.ByteSize(uint64(fileHeader.Size))
|
||||
copy.Spec.FileName = fileHeader.Filename
|
||||
copy.Spec.DownloadURL = fmt.Sprintf(GetS2iBinaryURL, namespace, name, copy.Spec.FileName)
|
||||
if origin.Status.Phase == v1alpha1.StatusReady && reflect.DeepEqual(origin, copy) {
|
||||
return origin, nil
|
||||
}
|
||||
|
||||
//Set status Uploading to lock resource
|
||||
uploading, err := s.SetS2iBinaryStatus(copy, v1alpha1.StatusUploading)
|
||||
if err != nil {
|
||||
err := restful.NewError(http.StatusConflict, fmt.Sprintf("could not set status: %+v", err))
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
copy = uploading.DeepCopy()
|
||||
copy.Spec.MD5 = md5
|
||||
copy.Spec.Size = bytefmt.ByteSize(uint64(fileHeader.Size))
|
||||
copy.Spec.FileName = fileHeader.Filename
|
||||
copy.Spec.DownloadURL = fmt.Sprintf(GetS2iBinaryURL, namespace, name, copy.Spec.FileName)
|
||||
|
||||
err = s.s3Client.Upload(fmt.Sprintf("%s-%s", namespace, name), copy.Spec.FileName, binFile, int(fileHeader.Size))
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case awsS3.ErrCodeNoSuchBucket:
|
||||
klog.Error(err)
|
||||
_, serr := s.SetS2iBinaryStatusWithRetry(copy, origin.Status.Phase)
|
||||
if serr != nil {
|
||||
klog.Error(serr)
|
||||
}
|
||||
return nil, err
|
||||
default:
|
||||
klog.Error(err)
|
||||
_, serr := s.SetS2iBinaryStatusWithRetry(copy, v1alpha1.StatusUploadFailed)
|
||||
if serr != nil {
|
||||
klog.Error(serr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if copy.Spec.UploadTimeStamp == nil {
|
||||
copy.Spec.UploadTimeStamp = new(metav1.Time)
|
||||
}
|
||||
*copy.Spec.UploadTimeStamp = metav1.Now()
|
||||
copy, err = s.client.DevopsV1alpha1().S2iBinaries(namespace).Update(context.Background(), copy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
copy, err = s.SetS2iBinaryStatusWithRetry(copy, v1alpha1.StatusReady)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return copy, nil
|
||||
}
|
||||
|
||||
func (s *s2iBinaryUploader) DownloadS2iBinary(namespace, name, fileName string) (string, error) {
|
||||
|
||||
origin, err := s.informers.Devops().V1alpha1().S2iBinaries().Lister().S2iBinaries(namespace).Get(name)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if origin.Spec.FileName != fileName {
|
||||
err := fmt.Errorf("could not fould file %s", fileName)
|
||||
klog.Error(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if origin.Status.Phase != v1alpha1.StatusReady {
|
||||
err := restful.NewError(http.StatusBadRequest, "file is not ready, please try later")
|
||||
klog.Error(err)
|
||||
return "", err
|
||||
}
|
||||
return s.s3Client.GetDownloadURL(fmt.Sprintf("%s-%s", namespace, name), fileName)
|
||||
}
|
||||
|
||||
func (s *s2iBinaryUploader) SetS2iBinaryStatus(s2ibin *v1alpha1.S2iBinary, status string) (*v1alpha1.S2iBinary, error) {
|
||||
copy := s2ibin.DeepCopy()
|
||||
copy.Status.Phase = status
|
||||
copy, err := s.client.DevopsV1alpha1().S2iBinaries(s2ibin.Namespace).Update(context.Background(), copy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return copy, nil
|
||||
}
|
||||
|
||||
func (s *s2iBinaryUploader) SetS2iBinaryStatusWithRetry(s2ibin *v1alpha1.S2iBinary, status string) (*v1alpha1.S2iBinary, error) {
|
||||
|
||||
var bin *v1alpha1.S2iBinary
|
||||
var err error
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
bin, err = s.client.DevopsV1alpha1().S2iBinaries(s2ibin.Namespace).Get(context.Background(), s2ibin.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
bin.Status.Phase = status
|
||||
bin, err = s.client.DevopsV1alpha1().S2iBinaries(s2ibin.Namespace).Update(context.Background(), bin, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bin, nil
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime/multipart"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.cloudfoundry.org/bytefmt"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientgotesting "k8s.io/client-go/testing"
|
||||
|
||||
"kubesphere.io/api/devops/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
fakeS3 "kubesphere.io/kubesphere/pkg/simple/client/s3/fake"
|
||||
"kubesphere.io/kubesphere/pkg/utils/hashutil"
|
||||
)
|
||||
|
||||
const (
|
||||
fileaContents = "This is a test file."
|
||||
fileaName = "filea.txt"
|
||||
boundary = `MyBoundary`
|
||||
ns = "testns"
|
||||
s2ibname = "test"
|
||||
)
|
||||
|
||||
const message = `
|
||||
--MyBoundary
|
||||
Content-Disposition: form-data; name="binary"; filename="filea.txt"
|
||||
Content-Type: text/plain
|
||||
|
||||
` + fileaContents + `
|
||||
--MyBoundary--
|
||||
`
|
||||
|
||||
func TestS2iBinaryUploader(t *testing.T) {
|
||||
s2ib := s2ibinary(ns, s2ibname)
|
||||
fakeKubeClient := fake.NewSimpleClientset(s2ib)
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeKubeClient.AddWatchReactor("*", clientgotesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
informerFactory := ksinformers.NewSharedInformerFactory(fakeKubeClient, 0)
|
||||
stopCh := make(chan struct{})
|
||||
s2iInformer := informerFactory.Devops().V1alpha1().S2iBinaries()
|
||||
//nolint:ineffassign,staticcheck
|
||||
err := s2iInformer.Informer().GetIndexer().Add(s2ib)
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
informerFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
s3 := fakeS3.NewFakeS3()
|
||||
uploader := NewS2iBinaryUploader(fakeKubeClient, informerFactory, s3)
|
||||
header := prepareFileHeader()
|
||||
file, err := header.Open()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
md5, err := hashutil.GetMD5(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantSpec := v1alpha1.S2iBinarySpec{
|
||||
FileName: fileaName,
|
||||
MD5: md5,
|
||||
Size: bytefmt.ByteSize(uint64(header.Size)),
|
||||
}
|
||||
|
||||
binary, err := uploader.UploadS2iBinary(ns, s2ibname, md5, header)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wantSpec.UploadTimeStamp = binary.Spec.UploadTimeStamp
|
||||
wantSpec.DownloadURL = binary.Spec.DownloadURL
|
||||
if !reflect.DeepEqual(binary.Spec, wantSpec) {
|
||||
t.Fatalf("s2ibinary spec is not same with expected, get: %+v, expected: %+v", binary, wantSpec)
|
||||
}
|
||||
|
||||
_, ok := s3.Storage[fmt.Sprintf("%s-%s", ns, s2ibname)]
|
||||
if !ok {
|
||||
t.Fatalf("should get file in s3")
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
url, err := uploader.DownloadS2iBinary(ns, s2ibname, fileaName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if url != fmt.Sprintf("http://%s-%s/%s", ns, s2ibname, fileaName) {
|
||||
t.Fatalf("download url is not equal with expected, get: %+v, expected: %+v", url, fmt.Sprintf("http://%s-%s/%s", ns, s2ibname, fileaName))
|
||||
}
|
||||
}
|
||||
|
||||
func prepareFileHeader() *multipart.FileHeader {
|
||||
reader := strings.NewReader(message)
|
||||
multipartReader := multipart.NewReader(reader, boundary)
|
||||
form, err := multipartReader.ReadForm(25)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return form.File["binary"][0]
|
||||
}
|
||||
|
||||
func s2ibinary(namespace, name string) *v1alpha1.S2iBinary {
|
||||
return &v1alpha1.S2iBinary{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.S2iBinarySpec{},
|
||||
Status: v1alpha1.S2iBinaryStatus{},
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
eventsv1alpha1 "kubesphere.io/kubesphere/pkg/api/events/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/events"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Events(queryParam *eventsv1alpha1.Query, MutateFilterFunc func(*events.Filter)) (*eventsv1alpha1.APIResponse, error)
|
||||
}
|
||||
|
||||
type eventsOperator struct {
|
||||
client events.Client
|
||||
}
|
||||
|
||||
func NewEventsOperator(client events.Client) Interface {
|
||||
return &eventsOperator{client}
|
||||
}
|
||||
|
||||
func (eo *eventsOperator) Events(queryParam *eventsv1alpha1.Query,
|
||||
MutateFilterFunc func(*events.Filter)) (*eventsv1alpha1.APIResponse, error) {
|
||||
filter := &events.Filter{
|
||||
InvolvedObjectNames: stringutils.Split(queryParam.InvolvedObjectNameFilter, ","),
|
||||
InvolvedObjectNameFuzzy: stringutils.Split(queryParam.InvolvedObjectNameSearch, ","),
|
||||
InvolvedObjectkinds: stringutils.Split(queryParam.InvolvedObjectKindFilter, ","),
|
||||
Reasons: stringutils.Split(queryParam.ReasonFilter, ","),
|
||||
ReasonFuzzy: stringutils.Split(queryParam.ReasonSearch, ","),
|
||||
MessageFuzzy: stringutils.Split(queryParam.MessageSearch, ","),
|
||||
Type: queryParam.TypeFilter,
|
||||
StartTime: queryParam.StartTime,
|
||||
EndTime: queryParam.EndTime,
|
||||
}
|
||||
if MutateFilterFunc != nil {
|
||||
MutateFilterFunc(filter)
|
||||
}
|
||||
|
||||
var ar eventsv1alpha1.APIResponse
|
||||
var err error
|
||||
switch queryParam.Operation {
|
||||
case "histogram":
|
||||
if len(filter.InvolvedObjectNamespaceMap) == 0 {
|
||||
ar.Histogram = &events.Histogram{}
|
||||
} else {
|
||||
ar.Histogram, err = eo.client.CountOverTime(filter, queryParam.Interval)
|
||||
}
|
||||
case "statistics":
|
||||
if len(filter.InvolvedObjectNamespaceMap) == 0 {
|
||||
ar.Statistics = &events.Statistics{}
|
||||
} else {
|
||||
ar.Statistics, err = eo.client.StatisticsOnResources(filter)
|
||||
}
|
||||
default:
|
||||
if len(filter.InvolvedObjectNamespaceMap) == 0 {
|
||||
ar.Events = &events.Events{}
|
||||
} else {
|
||||
ar.Events, err = eo.client.SearchEvents(filter, queryParam.From, queryParam.Size, queryParam.Sort)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ar, nil
|
||||
}
|
||||
@@ -1,530 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"kubesphere.io/api/gateway/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/pod"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
|
||||
)
|
||||
|
||||
const (
|
||||
MasterLabel = "node-role.kubernetes.io/control-plane"
|
||||
SidecarInject = "sidecar.istio.io/inject"
|
||||
gatewayPrefix = "kubesphere-router-"
|
||||
workingNamespace = "kubesphere-controls-system"
|
||||
globalGatewayNameSuffix = "kubesphere-system"
|
||||
globalGatewayName = gatewayPrefix + globalGatewayNameSuffix
|
||||
helmPatch = `{"metadata":{"annotations":{"meta.helm.sh/release-name":"%s-ingress","meta.helm.sh/release-namespace":"%s"},"labels":{"helm.sh/chart":"ingress-nginx-3.35.0","app.kubernetes.io/managed-by":"Helm","app":null,"component":null,"tier":null}},"spec":{"selector":null}}`
|
||||
)
|
||||
|
||||
type GatewayOperator interface {
|
||||
GetGateways(namespace string) ([]*v1alpha1.Gateway, error)
|
||||
CreateGateway(namespace string, obj *v1alpha1.Gateway) (*v1alpha1.Gateway, error)
|
||||
DeleteGateway(namespace string) error
|
||||
UpdateGateway(namespace string, obj *v1alpha1.Gateway) (*v1alpha1.Gateway, error)
|
||||
UpgradeGateway(namespace string) (*v1alpha1.Gateway, error)
|
||||
ListGateways(query *query.Query) (*api.ListResult, error)
|
||||
GetPods(namespace string, query *query.Query) (*api.ListResult, error)
|
||||
GetPodLogs(ctx context.Context, namespace string, podName string, logOptions *corev1.PodLogOptions, responseWriter io.Writer) error
|
||||
}
|
||||
|
||||
type gatewayOperator struct {
|
||||
k8sclient kubernetes.Interface
|
||||
factory informers.InformerFactory
|
||||
client client.Client
|
||||
cache cache.Cache
|
||||
options *gateway.Options
|
||||
}
|
||||
|
||||
func NewGatewayOperator(client client.Client, cache cache.Cache, options *gateway.Options, factory informers.InformerFactory, k8sclient kubernetes.Interface) GatewayOperator {
|
||||
return &gatewayOperator{
|
||||
client: client,
|
||||
cache: cache,
|
||||
options: options,
|
||||
k8sclient: k8sclient,
|
||||
factory: factory,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) getWorkingNamespace(namespace string) string {
|
||||
ns := c.options.Namespace
|
||||
// Set the working namespace to watching namespace when the Gateway's Namespace Option is empty
|
||||
if ns == "" {
|
||||
ns = namespace
|
||||
}
|
||||
// Convert the global gateway query parameter
|
||||
if namespace == globalGatewayNameSuffix {
|
||||
ns = workingNamespace
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
// override user's setting when create/update a project gateway.
|
||||
func (c *gatewayOperator) overrideDefaultValue(gateway *v1alpha1.Gateway, namespace string) *v1alpha1.Gateway {
|
||||
// override default name
|
||||
gateway.Name = fmt.Sprint(gatewayPrefix, namespace)
|
||||
if gateway.Name != globalGatewayName {
|
||||
gateway.Spec.Controller.Scope = v1alpha1.Scope{Enabled: true, Namespace: namespace}
|
||||
}
|
||||
gateway.Namespace = c.getWorkingNamespace(namespace)
|
||||
return gateway
|
||||
}
|
||||
|
||||
// getGlobalGateway returns the global gateway
|
||||
func (c *gatewayOperator) getGlobalGateway() *v1alpha1.Gateway {
|
||||
globalkey := types.NamespacedName{
|
||||
Namespace: workingNamespace,
|
||||
Name: globalGatewayName,
|
||||
}
|
||||
|
||||
global := &v1alpha1.Gateway{}
|
||||
if err := c.client.Get(context.TODO(), globalkey, global); err != nil {
|
||||
return nil
|
||||
}
|
||||
return global
|
||||
}
|
||||
|
||||
// getLegacyGateway returns gateway created by the router api.
|
||||
// Should always prompt user to upgrade the gateway.
|
||||
func (c *gatewayOperator) getLegacyGateway(namespace string) *v1alpha1.Gateway {
|
||||
s := &corev1.ServiceList{}
|
||||
|
||||
// filter legacy service by labels
|
||||
_ = c.client.List(context.TODO(), s, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
"app": "kubesphere",
|
||||
"component": "ks-router",
|
||||
"tier": "backend",
|
||||
"project": namespace,
|
||||
}),
|
||||
})
|
||||
|
||||
// create a fake Gateway object when legacy service exists
|
||||
if len(s.Items) > 0 {
|
||||
d := &appsv1.Deployment{}
|
||||
c.client.Get(context.TODO(), client.ObjectKeyFromObject(&s.Items[0]), d)
|
||||
|
||||
return c.convert(namespace, &s.Items[0], d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) convert(namespace string, svc *corev1.Service, deploy *appsv1.Deployment) *v1alpha1.Gateway {
|
||||
legacy := v1alpha1.Gateway{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "",
|
||||
APIVersion: "",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: svc.Name,
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
Service: v1alpha1.ServiceSpec{
|
||||
Annotations: svc.Annotations,
|
||||
Type: svc.Spec.Type,
|
||||
},
|
||||
Deployment: v1alpha1.DeploymentSpec{
|
||||
Replicas: deploy.Spec.Replicas,
|
||||
},
|
||||
},
|
||||
}
|
||||
if an, ok := deploy.Annotations[SidecarInject]; ok {
|
||||
legacy.Spec.Deployment.Annotations = make(map[string]string)
|
||||
legacy.Spec.Deployment.Annotations[SidecarInject] = an
|
||||
}
|
||||
if len(deploy.Spec.Template.Spec.Containers) > 0 {
|
||||
legacy.Spec.Deployment.Resources = deploy.Spec.Template.Spec.Containers[0].Resources
|
||||
}
|
||||
return &legacy
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) getMasterNodeIp() []string {
|
||||
internalIps := []string{}
|
||||
masters := &corev1.NodeList{}
|
||||
err := c.cache.List(context.TODO(), masters, &client.ListOptions{LabelSelector: labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
MasterLabel: "",
|
||||
})})
|
||||
|
||||
if err != nil {
|
||||
klog.Info(err)
|
||||
return internalIps
|
||||
}
|
||||
|
||||
for _, node := range masters.Items {
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == corev1.NodeInternalIP {
|
||||
internalIps = append(internalIps, address.Address)
|
||||
}
|
||||
}
|
||||
}
|
||||
return internalIps
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) updateStatus(gateway *v1alpha1.Gateway, svc *corev1.Service) (*v1alpha1.Gateway, error) {
|
||||
// append selected node ip as loadBalancer ingress ip
|
||||
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer && len(svc.Status.LoadBalancer.Ingress) == 0 {
|
||||
rips := c.getMasterNodeIp()
|
||||
for _, rip := range rips {
|
||||
gIngress := corev1.LoadBalancerIngress{
|
||||
IP: rip,
|
||||
}
|
||||
svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, gIngress)
|
||||
}
|
||||
}
|
||||
|
||||
status := unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"loadBalancer": svc.Status.LoadBalancer,
|
||||
"service": svc.Spec.Ports,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := status.MarshalJSON()
|
||||
if err != nil {
|
||||
return gateway, err
|
||||
}
|
||||
if gateway.Status.Raw != nil {
|
||||
//merge with origin status
|
||||
patch, err := jsonpatch.CreateMergePatch([]byte(`{}`), target)
|
||||
if err != nil {
|
||||
return gateway, err
|
||||
}
|
||||
modified, err := jsonpatch.MergePatch(gateway.Status.Raw, patch)
|
||||
if err != nil {
|
||||
return gateway, err
|
||||
}
|
||||
gateway.Status.Raw = modified
|
||||
return gateway, err
|
||||
}
|
||||
gateway.Status.Raw = target
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
// GetGateways returns all Gateways from the project. There are at most 2 gateways exists in a project,
|
||||
// a Global Gateway and a Project Gateway or a Legacy Project Gateway.
|
||||
func (c *gatewayOperator) GetGateways(namespace string) ([]*v1alpha1.Gateway, error) {
|
||||
|
||||
var gateways []*v1alpha1.Gateway
|
||||
|
||||
if g := c.getGlobalGateway(); g != nil {
|
||||
gateways = append(gateways, g)
|
||||
}
|
||||
if g := c.getLegacyGateway(namespace); g != nil {
|
||||
gateways = append(gateways, g)
|
||||
}
|
||||
|
||||
// Query non-cluster gateway
|
||||
if namespace != globalGatewayNameSuffix {
|
||||
key := types.NamespacedName{
|
||||
Namespace: c.getWorkingNamespace(namespace),
|
||||
Name: fmt.Sprint(gatewayPrefix, namespace),
|
||||
}
|
||||
obj := &v1alpha1.Gateway{}
|
||||
err := c.client.Get(context.TODO(), key, obj)
|
||||
|
||||
if err == nil {
|
||||
gateways = append(gateways, obj)
|
||||
} else if !errors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, g := range gateways {
|
||||
s := &corev1.Service{}
|
||||
// We supports the Service name always as same as gateway name.
|
||||
// TODO: We need a mapping relation between the service and the gateway. Label Selector should be a good option.
|
||||
err := c.client.Get(context.TODO(), client.ObjectKeyFromObject(g), s)
|
||||
if err != nil {
|
||||
klog.Info(err)
|
||||
continue
|
||||
}
|
||||
_, err = c.updateStatus(g, s)
|
||||
if err != nil {
|
||||
klog.Info(err)
|
||||
}
|
||||
}
|
||||
|
||||
return gateways, nil
|
||||
}
|
||||
|
||||
// Create a Gateway in a namespace
|
||||
func (c *gatewayOperator) CreateGateway(namespace string, obj *v1alpha1.Gateway) (*v1alpha1.Gateway, error) {
|
||||
|
||||
if g := c.getGlobalGateway(); g != nil {
|
||||
return nil, fmt.Errorf("can't create project gateway if global gateway enabled")
|
||||
}
|
||||
|
||||
if g := c.getLegacyGateway(namespace); g != nil {
|
||||
return nil, fmt.Errorf("can't create project gateway if legacy gateway exists, please upgrade the gateway firstly")
|
||||
}
|
||||
|
||||
c.overrideDefaultValue(obj, namespace)
|
||||
err := c.client.Create(context.TODO(), obj)
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// DeleteGateway is used to delete Gateway related resources in the namespace
|
||||
func (c *gatewayOperator) DeleteGateway(namespace string) error {
|
||||
obj := &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: c.getWorkingNamespace(namespace),
|
||||
Name: fmt.Sprint(gatewayPrefix, namespace),
|
||||
},
|
||||
}
|
||||
return c.client.Delete(context.TODO(), obj)
|
||||
}
|
||||
|
||||
// Update Gateway
|
||||
func (c *gatewayOperator) UpdateGateway(namespace string, obj *v1alpha1.Gateway) (*v1alpha1.Gateway, error) {
|
||||
if c.options.Namespace == "" && obj.Namespace != namespace || c.options.Namespace != "" && c.options.Namespace != obj.Namespace {
|
||||
return nil, fmt.Errorf("namespace doesn't match with origin namespace")
|
||||
}
|
||||
c.overrideDefaultValue(obj, namespace)
|
||||
err := c.client.Update(context.TODO(), obj)
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// UpgradeGateway upgrade the legacy Project Gateway to a Gateway CRD.
|
||||
// No rolling upgrade guaranteed, Service would be interrupted when deleting old deployment.
|
||||
func (c *gatewayOperator) UpgradeGateway(namespace string) (*v1alpha1.Gateway, error) {
|
||||
l := c.getLegacyGateway(namespace)
|
||||
if l == nil {
|
||||
return nil, fmt.Errorf("invalid operation, no legacy gateway was found")
|
||||
}
|
||||
if l.Namespace != c.getWorkingNamespace(namespace) {
|
||||
return nil, fmt.Errorf("invalid operation, can't upgrade legacy gateway when working namespace changed")
|
||||
}
|
||||
|
||||
// Get legacy gateway's config from configmap
|
||||
cm := &corev1.ConfigMap{}
|
||||
err := c.client.Get(context.TODO(), client.ObjectKey{Namespace: l.Namespace, Name: fmt.Sprintf("%s-nginx", l.Name)}, cm)
|
||||
if err == nil {
|
||||
l.Spec.Controller.Config = cm.Data
|
||||
defer func() {
|
||||
c.client.Delete(context.TODO(), cm)
|
||||
}()
|
||||
}
|
||||
|
||||
// Delete old deployment, because it's not compatible with the deployment in the helm chart.
|
||||
// We can't defer here, there's a potential race condition causing gateway operator fails.
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: l.Namespace,
|
||||
Name: l.Name,
|
||||
},
|
||||
}
|
||||
err = c.client.Delete(context.TODO(), d)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Patch the legacy Service with helm annotations, So that it can be managed by the helm release.
|
||||
patch := []byte(fmt.Sprintf(helmPatch, l.Name, l.Namespace))
|
||||
err = c.client.Patch(context.Background(), &corev1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: l.Namespace,
|
||||
Name: l.Name,
|
||||
},
|
||||
}, client.RawPatch(types.StrategicMergePatchType, patch))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.overrideDefaultValue(l, namespace)
|
||||
err = c.client.Create(context.TODO(), l)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) ListGateways(query *query.Query) (*api.ListResult, error) {
|
||||
gateways := v1alpha1.GatewayList{}
|
||||
err := c.cache.List(context.TODO(), &gateways, &client.ListOptions{LabelSelector: query.Selector()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var result []runtime.Object
|
||||
for i := range gateways.Items {
|
||||
result = append(result, &gateways.Items[i])
|
||||
}
|
||||
|
||||
services := &corev1.ServiceList{}
|
||||
|
||||
// filter legacy service by labels
|
||||
_ = c.client.List(context.TODO(), services, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(
|
||||
labels.Set{
|
||||
"app": "kubesphere",
|
||||
"component": "ks-router",
|
||||
"tier": "backend",
|
||||
}),
|
||||
})
|
||||
|
||||
for i := range services.Items {
|
||||
result = append(result, &services.Items[i])
|
||||
}
|
||||
|
||||
return v1alpha3.DefaultList(result, query, c.compare, c.filter, c.transform), nil
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) transform(obj runtime.Object) runtime.Object {
|
||||
if g, ok := obj.(*v1alpha1.Gateway); ok {
|
||||
svc := &corev1.Service{}
|
||||
// We supports the Service name always same as gateway name.
|
||||
err := c.client.Get(context.TODO(), client.ObjectKeyFromObject(g), svc)
|
||||
if err != nil {
|
||||
klog.Info(err)
|
||||
return g
|
||||
}
|
||||
g, err := c.updateStatus(g, svc)
|
||||
if err != nil {
|
||||
klog.Info(err)
|
||||
}
|
||||
return g
|
||||
|
||||
}
|
||||
if svc, ok := obj.(*corev1.Service); ok {
|
||||
d := &appsv1.Deployment{}
|
||||
c.client.Get(context.TODO(), client.ObjectKeyFromObject(svc), d)
|
||||
g, err := c.updateStatus(c.convert(svc.Labels["project"], svc, d), svc)
|
||||
if err != nil {
|
||||
klog.Info(err)
|
||||
}
|
||||
return g
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) compare(left runtime.Object, right runtime.Object, field query.Field) bool {
|
||||
|
||||
leftGateway, ok := left.(*v1alpha1.Gateway)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
rightGateway, ok := right.(*v1alpha1.Gateway)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return v1alpha3.DefaultObjectMetaCompare(leftGateway.ObjectMeta, rightGateway.ObjectMeta, field)
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) filter(object runtime.Object, filter query.Filter) bool {
|
||||
var objMeta v1.ObjectMeta
|
||||
var namespace string
|
||||
|
||||
gateway, ok := object.(*v1alpha1.Gateway)
|
||||
if !ok {
|
||||
svc, ok := object.(*corev1.Service)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
namespace = svc.Labels["project"]
|
||||
objMeta = svc.ObjectMeta
|
||||
} else {
|
||||
namespace = gateway.Spec.Controller.Scope.Namespace
|
||||
objMeta = gateway.ObjectMeta
|
||||
}
|
||||
|
||||
switch filter.Field {
|
||||
case query.FieldNamespace:
|
||||
return strings.Compare(namespace, string(filter.Value)) == 0
|
||||
default:
|
||||
return v1alpha3.DefaultObjectMetaFilter(objMeta, filter)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) GetPods(namespace string, query *query.Query) (*api.ListResult, error) {
|
||||
podGetter := pod.New(c.factory.KubernetesSharedInformerFactory())
|
||||
|
||||
//TODO: move the selector string to options
|
||||
selector, err := labels.Parse(fmt.Sprintf("app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/instance=kubesphere-router-%s-ingress", namespace))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invaild selector config")
|
||||
}
|
||||
query.LabelSelector = selector.String()
|
||||
return podGetter.List(c.getWorkingNamespace(namespace), query)
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) GetPodLogs(ctx context.Context, namespace string, podName string, logOptions *corev1.PodLogOptions, responseWriter io.Writer) error {
|
||||
workingNamespace := c.getWorkingNamespace(namespace)
|
||||
|
||||
pods, err := c.GetPods(namespace, query.New())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !c.hasPod(pods.Items, types.NamespacedName{Namespace: workingNamespace, Name: podName}) {
|
||||
return fmt.Errorf("pod does not exist")
|
||||
}
|
||||
|
||||
podLogRequest := c.k8sclient.CoreV1().
|
||||
Pods(workingNamespace).
|
||||
GetLogs(podName, logOptions)
|
||||
reader, err := podLogRequest.Stream(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(responseWriter, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) hasPod(slice []interface{}, key types.NamespacedName) bool {
|
||||
for _, s := range slice {
|
||||
pod, ok := s.(*corev1.Pod)
|
||||
if ok && client.ObjectKeyFromObject(pod) == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,978 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"kubesphere.io/api/gateway/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
|
||||
)
|
||||
|
||||
func Test_gatewayOperator_GetGateways(t *testing.T) {
|
||||
|
||||
type fields struct {
|
||||
client client.Client
|
||||
cache cache.Cache
|
||||
options *gateway.Options
|
||||
}
|
||||
type args struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
corev1.AddToScheme(Scheme)
|
||||
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
client.Create(context.TODO(), &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project3",
|
||||
Namespace: "project3",
|
||||
},
|
||||
})
|
||||
|
||||
client.Create(context.TODO(), &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project4",
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
})
|
||||
|
||||
//nolint:staticcheck
|
||||
client2 := fake.NewFakeClientWithScheme(Scheme)
|
||||
create_GlobalGateway(client2)
|
||||
|
||||
//nolint:staticcheck
|
||||
client3 := fake.NewFakeClientWithScheme(Scheme)
|
||||
create_LegacyGateway(client3, "project6")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want []*v1alpha1.Gateway
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "return empty gateway list from watching namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "return empty gateway list from working namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get gateway from watching namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project3",
|
||||
},
|
||||
want: wantedResult("kubesphere-router-project3", "project3"),
|
||||
},
|
||||
{
|
||||
name: "get gateway from working namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project4",
|
||||
},
|
||||
want: wantedResult("kubesphere-router-project4", "kubesphere-controls-system"),
|
||||
},
|
||||
{
|
||||
name: "get global gateway",
|
||||
fields: fields{
|
||||
client: client2,
|
||||
cache: &fakeClient{Client: client2},
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project5",
|
||||
},
|
||||
want: wantedResult("kubesphere-router-kubesphere-system", "kubesphere-controls-system"),
|
||||
},
|
||||
{
|
||||
name: "get Legacy gateway",
|
||||
fields: fields{
|
||||
client: client3,
|
||||
cache: &fakeClient{Client: client3},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project6",
|
||||
},
|
||||
want: []*v1alpha1.Gateway{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprint(gatewayPrefix, "project6"),
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "project6",
|
||||
},
|
||||
},
|
||||
Service: v1alpha1.ServiceSpec{
|
||||
Annotations: map[string]string{
|
||||
"fake": "true",
|
||||
},
|
||||
Type: corev1.ServiceTypeNodePort,
|
||||
},
|
||||
},
|
||||
Status: runtime.RawExtension{
|
||||
Raw: []byte("{\"loadBalancer\":{},\"service\":[{\"name\":\"http\",\"protocol\":\"TCP\",\"port\":80,\"targetPort\":0}]}\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
cache: tt.fields.cache,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
got, err := c.GetGateways(tt.args.namespace)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.GetGateways() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("gatewayOperator.GetGateways() has wrong object\nDiff:\n %s", diff.ObjectGoPrintSideBySide(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func wantedResult(name, namspace string) []*v1alpha1.Gateway {
|
||||
return []*v1alpha1.Gateway{
|
||||
{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Gateway",
|
||||
APIVersion: "gateway.kubesphere.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namspace,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func create_GlobalGateway(c client.Client) *v1alpha1.Gateway {
|
||||
g := &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-kubesphere-system",
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
}
|
||||
_ = c.Create(context.TODO(), g)
|
||||
return g
|
||||
}
|
||||
|
||||
func create_LegacyGateway(c client.Client, namespace string) {
|
||||
s := &corev1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprint(gatewayPrefix, namespace),
|
||||
Namespace: workingNamespace,
|
||||
Annotations: map[string]string{
|
||||
"fake": "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "kubesphere",
|
||||
"component": "ks-router",
|
||||
"tier": "backend",
|
||||
"project": namespace,
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http", Protocol: corev1.ProtocolTCP, Port: 80,
|
||||
},
|
||||
},
|
||||
Type: corev1.ServiceTypeNodePort,
|
||||
},
|
||||
}
|
||||
c.Create(context.TODO(), s)
|
||||
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprint(gatewayPrefix, namespace),
|
||||
Namespace: workingNamespace,
|
||||
Annotations: map[string]string{
|
||||
SidecarInject: "true",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "kubesphere",
|
||||
"component": "ks-router",
|
||||
"tier": "backend",
|
||||
"project": namespace,
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &[]int32{1}[0],
|
||||
},
|
||||
}
|
||||
c.Create(context.TODO(), d)
|
||||
}
|
||||
|
||||
func create_LegacyGatewayConfigMap(c client.Client, namespace string) {
|
||||
s := &corev1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprint(gatewayPrefix, namespace, "-nginx"),
|
||||
Namespace: workingNamespace,
|
||||
Labels: map[string]string{
|
||||
"app": "kubesphere",
|
||||
"component": "ks-router",
|
||||
"tier": "backend",
|
||||
"project": namespace,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"fake": "true",
|
||||
},
|
||||
}
|
||||
c.Create(context.TODO(), s)
|
||||
}
|
||||
|
||||
func Test_gatewayOperator_CreateGateway(t *testing.T) {
|
||||
type fields struct {
|
||||
client client.Client
|
||||
options *gateway.Options
|
||||
cache cache.Cache
|
||||
}
|
||||
type args struct {
|
||||
namespace string
|
||||
obj *v1alpha1.Gateway
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
corev1.AddToScheme(Scheme)
|
||||
appsv1.AddToScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want func(GatewayOperator, string) *v1alpha1.Gateway
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "creates gateway in watching namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project1",
|
||||
obj: &v1alpha1.Gateway{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Gateway",
|
||||
APIVersion: "gateway.kubesphere.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "project1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: func(o GatewayOperator, s string) *v1alpha1.Gateway {
|
||||
g, _ := o.GetGateways(s)
|
||||
return g[0]
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "creates gateway in working namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project2",
|
||||
obj: &v1alpha1.Gateway{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Gateway",
|
||||
APIVersion: "gateway.kubesphere.io/v1alpha1",
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "project2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: func(o GatewayOperator, s string) *v1alpha1.Gateway {
|
||||
g, _ := o.GetGateways(s)
|
||||
return g[0]
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
cache: tt.fields.cache,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
got, err := c.CreateGateway(tt.args.namespace, tt.args.obj)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.CreateGateway() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
w := tt.want(c, tt.args.namespace)
|
||||
if !reflect.DeepEqual(got, w) {
|
||||
t.Errorf("gatewayOperator.CreateGateway() has wrong object\nDiff:\n %s", diff.ObjectGoPrintSideBySide(w, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_gatewayOperator_DeleteGateway(t *testing.T) {
|
||||
type fields struct {
|
||||
client client.Client
|
||||
options *gateway.Options
|
||||
}
|
||||
type args struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
client.Create(context.TODO(), &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project1",
|
||||
Namespace: "project1",
|
||||
},
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "delete gateway",
|
||||
fields: fields{
|
||||
client: client,
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete none exist gateway",
|
||||
fields: fields{
|
||||
client: client,
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project2",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
if err := c.DeleteGateway(tt.args.namespace); (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.DeleteGateway() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_gatewayOperator_UpdateGateway(t *testing.T) {
|
||||
type fields struct {
|
||||
client client.Client
|
||||
options *gateway.Options
|
||||
}
|
||||
type args struct {
|
||||
namespace string
|
||||
obj *v1alpha1.Gateway
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
client.Create(context.TODO(), &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project3",
|
||||
Namespace: "project3",
|
||||
},
|
||||
})
|
||||
|
||||
obj := &v1alpha1.Gateway{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Gateway",
|
||||
APIVersion: "gateway.kubesphere.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project3",
|
||||
Namespace: "project3",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "project3",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
want := obj.DeepCopy()
|
||||
want.ResourceVersion = "2"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *v1alpha1.Gateway
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "update gateway from watching namespace",
|
||||
fields: fields{
|
||||
client: client,
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project3",
|
||||
obj: obj,
|
||||
},
|
||||
want: want,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
got, err := c.UpdateGateway(tt.args.namespace, tt.args.obj)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.UpdateGateway() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("gatewayOperator.UpdateGateway() has wrong object\nDiff:\n %s", diff.ObjectGoPrintSideBySide(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_gatewayOperator_UpgradeGateway(t *testing.T) {
|
||||
type fields struct {
|
||||
client client.Client
|
||||
options *gateway.Options
|
||||
}
|
||||
type args struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
corev1.AddToScheme(Scheme)
|
||||
appsv1.AddToScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client2 := fake.NewFakeClientWithScheme(Scheme)
|
||||
create_LegacyGateway(client2, "project2")
|
||||
create_LegacyGatewayConfigMap(client2, "project2")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *v1alpha1.Gateway
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no legacy gateway exists",
|
||||
fields: fields{
|
||||
client: client,
|
||||
options: &gateway.Options{
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project1",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "upgrade legacy gateway",
|
||||
fields: fields{
|
||||
client: client2,
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "project2",
|
||||
},
|
||||
want: &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project2",
|
||||
Namespace: "kubesphere-controls-system",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "project2",
|
||||
},
|
||||
Config: map[string]string{
|
||||
"fake": "true",
|
||||
},
|
||||
},
|
||||
Service: v1alpha1.ServiceSpec{
|
||||
Annotations: map[string]string{
|
||||
"fake": "true",
|
||||
},
|
||||
Type: corev1.ServiceTypeNodePort,
|
||||
},
|
||||
Deployment: v1alpha1.DeploymentSpec{
|
||||
Replicas: &[]int32{1}[0],
|
||||
Annotations: map[string]string{
|
||||
"sidecar.istio.io/inject": "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
got, err := c.UpgradeGateway(tt.args.namespace)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.UpgradeGateway() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("gatewayOperator.UpgradeGateway() has wrong object\nDiff:\n %s", diff.ObjectGoPrintSideBySide(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_gatewayOperator_ListGateways(t *testing.T) {
|
||||
type fields struct {
|
||||
client client.Client
|
||||
cache cache.Cache
|
||||
options *gateway.Options
|
||||
}
|
||||
type args struct {
|
||||
query *query.Query
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
corev1.AddToScheme(Scheme)
|
||||
appsv1.AddToScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
create_LegacyGateway(client, "project2")
|
||||
|
||||
client.Create(context.TODO(), &v1alpha1.Gateway{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "kubesphere-router-project1",
|
||||
Namespace: "project1",
|
||||
},
|
||||
})
|
||||
|
||||
gates := []*v1alpha1.Gateway{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprint(gatewayPrefix, "project2"),
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
Spec: v1alpha1.GatewaySpec{
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "project2",
|
||||
},
|
||||
},
|
||||
Service: v1alpha1.ServiceSpec{
|
||||
Annotations: map[string]string{
|
||||
"fake": "true",
|
||||
},
|
||||
Type: corev1.ServiceTypeNodePort,
|
||||
},
|
||||
Deployment: v1alpha1.DeploymentSpec{
|
||||
Replicas: &[]int32{1}[0],
|
||||
Annotations: map[string]string{
|
||||
SidecarInject: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: runtime.RawExtension{
|
||||
Raw: []byte("{\"loadBalancer\":{},\"service\":[{\"name\":\"http\",\"protocol\":\"TCP\",\"port\":80,\"targetPort\":0}]}\n"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprint(gatewayPrefix, "project1"),
|
||||
Namespace: "project1",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
items := make([]interface{}, 0)
|
||||
for _, obj := range gates {
|
||||
items = append(items, obj)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *api.ListResult
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "list all gateways",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
query: &query.Query{},
|
||||
},
|
||||
want: &api.ListResult{
|
||||
TotalItems: 2,
|
||||
Items: items,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
cache: tt.fields.cache,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
got, err := c.ListGateways(tt.args.query)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.ListGateways() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("gatewayOperator.ListGateways() has wrong object\nDiff:\n %s", diff.ObjectGoPrintSideBySide(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeClient struct {
|
||||
Client client.Client
|
||||
}
|
||||
|
||||
// Get retrieves an obj for the given object key from the Kubernetes Cluster.
|
||||
// obj must be a struct pointer so that obj can be updated with the response
|
||||
// returned by the Server.
|
||||
func (f *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
|
||||
return f.Client.Get(ctx, key, obj, opts...)
|
||||
}
|
||||
|
||||
// List retrieves list of objects for a given namespace and list options. On a
|
||||
// successful call, Items field in the list will be populated with the
|
||||
// result returned from the server.
|
||||
func (f *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
|
||||
return f.Client.List(ctx, list, opts...)
|
||||
}
|
||||
|
||||
// GetInformer fetches or constructs an informer for the given object that corresponds to a single
|
||||
// API kind and resource.
|
||||
func (f *fakeClient) GetInformer(ctx context.Context, obj client.Object) (cache.Informer, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead
|
||||
// of the underlying object.
|
||||
func (f *fakeClient) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (cache.Informer, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Start runs all the informers known to this cache until the context is closed.
|
||||
// It blocks.
|
||||
func (f *fakeClient) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache.
|
||||
func (f *fakeClient) WaitForCacheSync(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *fakeClient) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_gatewayOperator_status(t *testing.T) {
|
||||
type fields struct {
|
||||
client client.Client
|
||||
cache cache.Cache
|
||||
options *gateway.Options
|
||||
}
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(Scheme)
|
||||
corev1.AddToScheme(Scheme)
|
||||
appsv1.AddToScheme(Scheme)
|
||||
|
||||
//nolint:staticcheck
|
||||
client := fake.NewFakeClientWithScheme(Scheme)
|
||||
//nolint:staticcheck
|
||||
client2 := fake.NewFakeClientWithScheme(Scheme)
|
||||
|
||||
fake := &corev1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "fake-node",
|
||||
Labels: map[string]string{
|
||||
MasterLabel: "",
|
||||
},
|
||||
},
|
||||
Status: corev1.NodeStatus{
|
||||
Addresses: []corev1.NodeAddress{
|
||||
{
|
||||
Type: corev1.NodeInternalIP,
|
||||
Address: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
client2.Create(context.TODO(), fake)
|
||||
|
||||
type args struct {
|
||||
gateway *v1alpha1.Gateway
|
||||
svc *corev1.Service
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *v1alpha1.Gateway
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "default",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
gateway: &v1alpha1.Gateway{},
|
||||
svc: &corev1.Service{
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http", Protocol: corev1.ProtocolTCP, Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &v1alpha1.Gateway{
|
||||
Status: runtime.RawExtension{
|
||||
Raw: []byte("{\"loadBalancer\":{},\"service\":[{\"name\":\"http\",\"protocol\":\"TCP\",\"port\":80,\"targetPort\":0}]}\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default",
|
||||
fields: fields{
|
||||
client: client,
|
||||
cache: &fakeClient{Client: client},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
gateway: &v1alpha1.Gateway{
|
||||
Status: runtime.RawExtension{
|
||||
Raw: []byte("{\"fake\":{}}"),
|
||||
},
|
||||
},
|
||||
svc: &corev1.Service{
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http", Protocol: corev1.ProtocolTCP, Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &v1alpha1.Gateway{
|
||||
Status: runtime.RawExtension{
|
||||
Raw: []byte("{\"fake\":{},\"loadBalancer\":{},\"service\":[{\"name\":\"http\",\"port\":80,\"protocol\":\"TCP\",\"targetPort\":0}]}"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Master Node IP",
|
||||
fields: fields{
|
||||
client: client2,
|
||||
cache: &fakeClient{Client: client2},
|
||||
options: &gateway.Options{
|
||||
Namespace: "kubesphere-controls-system",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
gateway: &v1alpha1.Gateway{},
|
||||
svc: &corev1.Service{
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "http", Protocol: corev1.ProtocolTCP, Port: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &v1alpha1.Gateway{
|
||||
Status: runtime.RawExtension{
|
||||
Raw: []byte("{\"loadBalancer\":{\"ingress\":[{\"ip\":\"192.168.1.1\"}]},\"service\":[{\"name\":\"http\",\"protocol\":\"TCP\",\"port\":80,\"targetPort\":0}]}\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &gatewayOperator{
|
||||
client: tt.fields.client,
|
||||
cache: tt.fields.cache,
|
||||
options: tt.fields.options,
|
||||
}
|
||||
got, err := c.updateStatus(tt.args.gateway, tt.args.svc)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("gatewayOperator.status() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("gatewayOperator.status() has wrong object\nDiff:\n %s", diff.ObjectGoPrintSideBySide(tt.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,21 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4"
|
||||
"gopkg.in/src-d/go-git.v4/config"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
|
||||
"gopkg.in/src-d/go-git.v4/storage/memory"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
"github.com/go-git/go-git/v5/storage/memory"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type AuthInfo struct {
|
||||
@@ -37,19 +28,20 @@ type GitVerifier interface {
|
||||
}
|
||||
|
||||
type gitVerifier struct {
|
||||
informers informers.SharedInformerFactory
|
||||
cache runtimeclient.Reader
|
||||
}
|
||||
|
||||
func NewGitVerifier(informers informers.SharedInformerFactory) GitVerifier {
|
||||
return &gitVerifier{informers: informers}
|
||||
func NewGitVerifier(cacheReader runtimeclient.Reader) GitVerifier {
|
||||
return &gitVerifier{cache: cacheReader}
|
||||
}
|
||||
|
||||
func (c *gitVerifier) VerifyGitCredential(remoteUrl, namespace, secretName string) error {
|
||||
var username, password string
|
||||
|
||||
if len(secretName) > 0 {
|
||||
secret, err := c.informers.Core().V1().Secrets().Lister().Secrets(namespace).Get(secretName)
|
||||
if err != nil {
|
||||
secret := &corev1.Secret{}
|
||||
if err := c.cache.Get(context.Background(),
|
||||
types.NamespacedName{Namespace: namespace, Name: secretName}, secret); err != nil {
|
||||
return err
|
||||
}
|
||||
usernameBytes, ok := secret.Data[corev1.BasicAuthUsernameKey]
|
||||
@@ -69,7 +61,6 @@ func (c *gitVerifier) VerifyGitCredential(remoteUrl, namespace, secretName strin
|
||||
|
||||
func (c *gitVerifier) gitReadVerifyWithBasicAuth(username string, password string, remote string) error {
|
||||
r, _ := git.Init(memory.NewStorage(), nil)
|
||||
|
||||
// Add a new remote, with the default fetch refspec
|
||||
origin, err := r.CreateRemote(&config.RemoteConfig{
|
||||
Name: git.DefaultRemoteName,
|
||||
@@ -78,6 +69,6 @@ func (c *gitVerifier) gitReadVerifyWithBasicAuth(username string, password strin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = origin.List(&git.ListOptions{Auth: &http.BasicAuth{Username: string(username), Password: string(password)}})
|
||||
_, err = origin.List(&git.ListOptions{Auth: &http.BasicAuth{Username: username, Password: password}})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,23 +1,16 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
runtimefakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/scheme"
|
||||
)
|
||||
|
||||
func TestGitReadVerifyWithBasicAuth(t *testing.T) {
|
||||
@@ -50,7 +43,11 @@ func TestGitReadVerifyWithBasicAuth(t *testing.T) {
|
||||
"remote": "git@fdsfs41342`@@@2414!!!!github.com:kubesphere/kubesphere.git",
|
||||
},
|
||||
}
|
||||
verifier := gitVerifier{informers: nil}
|
||||
client := runtimefakeclient.NewClientBuilder().
|
||||
WithScheme(scheme.Scheme).
|
||||
Build()
|
||||
|
||||
verifier := gitVerifier{cache: client}
|
||||
|
||||
for _, item := range shouldSuccess {
|
||||
err := verifier.gitReadVerifyWithBasicAuth(item["username"], item["password"], item["remote"])
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package group
|
||||
|
||||
@@ -21,48 +10,45 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
tenantv1alpha1 "kubesphere.io/api/tenant/v1alpha1"
|
||||
"k8s.io/utils/ptr"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
"kubesphere.io/api/tenant/v1beta1"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
resourcesv1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
)
|
||||
|
||||
type GroupOperator interface {
|
||||
ListGroups(workspace string, queryParam *query.Query) (*api.ListResult, error)
|
||||
CreateGroup(workspace string, namespace *iamv1alpha2.Group) (*iamv1alpha2.Group, error)
|
||||
DescribeGroup(workspace, group string) (*iamv1alpha2.Group, error)
|
||||
CreateGroup(workspace string, namespace *iamv1beta1.Group) (*iamv1beta1.Group, error)
|
||||
DescribeGroup(workspace, group string) (*iamv1beta1.Group, error)
|
||||
DeleteGroup(workspace, group string) error
|
||||
UpdateGroup(workspace string, group *iamv1alpha2.Group) (*iamv1alpha2.Group, error)
|
||||
PatchGroup(workspace string, group *iamv1alpha2.Group) (*iamv1alpha2.Group, error)
|
||||
UpdateGroup(workspace string, group *iamv1beta1.Group) (*iamv1beta1.Group, error)
|
||||
PatchGroup(workspace string, group *iamv1beta1.Group) (*iamv1beta1.Group, error)
|
||||
DeleteGroupBinding(workspace, name string) error
|
||||
CreateGroupBinding(workspace, groupName, userName string) (*iamv1alpha2.GroupBinding, error)
|
||||
CreateGroupBinding(workspace, groupName, userName string) (*iamv1beta1.GroupBinding, error)
|
||||
ListGroupBindings(workspace string, queryParam *query.Query) (*api.ListResult, error)
|
||||
}
|
||||
|
||||
type groupOperator struct {
|
||||
k8sclient kubernetes.Interface
|
||||
ksclient kubesphere.Interface
|
||||
resourceGetter *resourcesv1alpha3.ResourceGetter
|
||||
client runtimeclient.Client
|
||||
resourceGetter *resourcesv1alpha3.Getter
|
||||
}
|
||||
|
||||
func New(informers informers.InformerFactory, ksclient kubesphere.Interface, k8sclient kubernetes.Interface) GroupOperator {
|
||||
func New(cacheClient runtimeclient.Client, k8sVersion *semver.Version) GroupOperator {
|
||||
return &groupOperator{
|
||||
resourceGetter: resourcesv1alpha3.NewResourceGetter(informers, nil),
|
||||
k8sclient: k8sclient,
|
||||
ksclient: ksclient,
|
||||
resourceGetter: resourcesv1alpha3.NewResourceGetter(cacheClient, k8sVersion),
|
||||
client: cacheClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +56,7 @@ func (t *groupOperator) ListGroups(workspace string, queryParam *query.Query) (*
|
||||
|
||||
if workspace != "" {
|
||||
// filter by workspace
|
||||
queryParam.Filters[query.FieldLabel] = query.Value(fmt.Sprintf("%s=%s", tenantv1alpha1.WorkspaceLabel, workspace))
|
||||
queryParam.Filters[query.FieldLabel] = query.Value(fmt.Sprintf("%s=%s", v1beta1.WorkspaceLabel, workspace))
|
||||
}
|
||||
|
||||
result, err := t.resourceGetter.List("groups", "", queryParam)
|
||||
@@ -82,10 +68,10 @@ func (t *groupOperator) ListGroups(workspace string, queryParam *query.Query) (*
|
||||
}
|
||||
|
||||
// CreateGroup adds a workspace label to group which indicates group is under the workspace
|
||||
func (t *groupOperator) CreateGroup(workspace string, group *iamv1alpha2.Group) (*iamv1alpha2.Group, error) {
|
||||
func (t *groupOperator) CreateGroup(workspace string, group *iamv1beta1.Group) (*iamv1beta1.Group, error) {
|
||||
|
||||
if group.GenerateName == "" {
|
||||
err := errors.NewInvalid(iamv1alpha2.SchemeGroupVersion.WithKind(iamv1alpha2.ResourcePluralGroup).GroupKind(),
|
||||
err := errors.NewInvalid(iamv1beta1.SchemeGroupVersion.WithKind(iamv1beta1.ResourcePluralGroup).GroupKind(),
|
||||
"", []*field.Error{field.Required(field.NewPath("metadata.generateName"), "generateName is required")})
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
@@ -95,13 +81,14 @@ func (t *groupOperator) CreateGroup(workspace string, group *iamv1alpha2.Group)
|
||||
if unique, err := t.isGenerateNameUnique(workspace, group.GenerateName); err != nil {
|
||||
return nil, err
|
||||
} else if !unique {
|
||||
err = errors.NewConflict(iamv1alpha2.Resource(iamv1alpha2.ResourcePluralGroup),
|
||||
err = errors.NewConflict(iamv1beta1.Resource(iamv1beta1.ResourcePluralGroup),
|
||||
group.GenerateName, fmt.Errorf("a group named %s already exists in the workspace", group.GenerateName))
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t.ksclient.IamV1alpha2().Groups().Create(context.Background(), labelGroupWithWorkspaceName(group, workspace), metav1.CreateOptions{})
|
||||
group = labelGroupWithWorkspaceName(group, workspace)
|
||||
return group, t.client.Create(context.Background(), group)
|
||||
}
|
||||
|
||||
func (t *groupOperator) isGenerateNameUnique(workspace, generateName string) (bool, error) {
|
||||
@@ -113,7 +100,7 @@ func (t *groupOperator) isGenerateNameUnique(workspace, generateName string) (bo
|
||||
return false, err
|
||||
}
|
||||
for _, obj := range result.Items {
|
||||
g := obj.(*iamv1alpha2.Group)
|
||||
g := obj.(*iamv1beta1.Group)
|
||||
if g.GenerateName == generateName {
|
||||
return false, err
|
||||
}
|
||||
@@ -121,13 +108,13 @@ func (t *groupOperator) isGenerateNameUnique(workspace, generateName string) (bo
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (t *groupOperator) DescribeGroup(workspace, group string) (*iamv1alpha2.Group, error) {
|
||||
func (t *groupOperator) DescribeGroup(workspace, group string) (*iamv1beta1.Group, error) {
|
||||
obj, err := t.resourceGetter.Get("groups", "", group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ns := obj.(*iamv1alpha2.Group)
|
||||
if ns.Labels[tenantv1alpha1.WorkspaceLabel] != workspace {
|
||||
ns := obj.(*iamv1beta1.Group)
|
||||
if ns.Labels[v1beta1.WorkspaceLabel] != workspace {
|
||||
err := errors.NewNotFound(corev1.Resource("group"), group)
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
@@ -135,73 +122,72 @@ func (t *groupOperator) DescribeGroup(workspace, group string) (*iamv1alpha2.Gro
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
func (t *groupOperator) DeleteGroup(workspace, group string) error {
|
||||
_, err := t.DescribeGroup(workspace, group)
|
||||
func (t *groupOperator) DeleteGroup(workspace, groupName string) error {
|
||||
group, err := t.DescribeGroup(workspace, groupName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.ksclient.IamV1alpha2().Groups().Delete(context.Background(), group, *metav1.NewDeleteOptions(0))
|
||||
return t.client.Delete(context.Background(), group, &runtimeclient.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)})
|
||||
}
|
||||
|
||||
func (t *groupOperator) UpdateGroup(workspace string, group *iamv1alpha2.Group) (*iamv1alpha2.Group, error) {
|
||||
func (t *groupOperator) UpdateGroup(workspace string, group *iamv1beta1.Group) (*iamv1beta1.Group, error) {
|
||||
_, err := t.DescribeGroup(workspace, group.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
group = labelGroupWithWorkspaceName(group, workspace)
|
||||
return t.ksclient.IamV1alpha2().Groups().Update(context.Background(), group, metav1.UpdateOptions{})
|
||||
return group, t.client.Update(context.Background(), group)
|
||||
}
|
||||
|
||||
func (t *groupOperator) PatchGroup(workspace string, group *iamv1alpha2.Group) (*iamv1alpha2.Group, error) {
|
||||
_, err := t.DescribeGroup(workspace, group.Name)
|
||||
func (t *groupOperator) PatchGroup(workspace string, group *iamv1beta1.Group) (*iamv1beta1.Group, error) {
|
||||
group, err := t.DescribeGroup(workspace, group.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if group.Labels != nil {
|
||||
group.Labels[tenantv1alpha1.WorkspaceLabel] = workspace
|
||||
group.Labels[v1beta1.WorkspaceLabel] = workspace
|
||||
}
|
||||
data, err := json.Marshal(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.ksclient.IamV1alpha2().Groups().Patch(context.Background(), group.Name, types.MergePatchType, data, metav1.PatchOptions{})
|
||||
return group, t.client.Patch(context.Background(), group, runtimeclient.RawPatch(types.MergePatchType, data))
|
||||
}
|
||||
|
||||
func (t *groupOperator) DeleteGroupBinding(workspace, name string) error {
|
||||
obj, err := t.resourceGetter.Get("groupbindings", "", name)
|
||||
if err != nil {
|
||||
groupBinding := &iamv1beta1.GroupBinding{}
|
||||
if err := t.client.Get(context.Background(), types.NamespacedName{Name: name}, groupBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
ns := obj.(*iamv1alpha2.GroupBinding)
|
||||
if ns.Labels[tenantv1alpha1.WorkspaceLabel] != workspace {
|
||||
err := errors.NewNotFound(corev1.Resource("groupbinding"), name)
|
||||
if groupBinding.Labels[v1beta1.WorkspaceLabel] != workspace {
|
||||
err := errors.NewNotFound(corev1.Resource("groupbindings"), name)
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return t.ksclient.IamV1alpha2().GroupBindings().Delete(context.Background(), name, *metav1.NewDeleteOptions(0))
|
||||
return t.client.Delete(context.Background(), groupBinding, &runtimeclient.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)})
|
||||
}
|
||||
|
||||
func (t *groupOperator) CreateGroupBinding(workspace, groupName, userName string) (*iamv1alpha2.GroupBinding, error) {
|
||||
func (t *groupOperator) CreateGroupBinding(workspace, groupName, userName string) (*iamv1beta1.GroupBinding, error) {
|
||||
|
||||
groupBinding := iamv1alpha2.GroupBinding{
|
||||
groupBinding := &iamv1beta1.GroupBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("%s-%s-", groupName, userName),
|
||||
Labels: map[string]string{
|
||||
iamv1alpha2.UserReferenceLabel: userName,
|
||||
iamv1alpha2.GroupReferenceLabel: groupName,
|
||||
tenantv1alpha1.WorkspaceLabel: workspace,
|
||||
iamv1beta1.UserReferenceLabel: userName,
|
||||
iamv1beta1.GroupReferenceLabel: groupName,
|
||||
v1beta1.WorkspaceLabel: workspace,
|
||||
},
|
||||
},
|
||||
Users: []string{userName},
|
||||
GroupRef: iamv1alpha2.GroupRef{
|
||||
APIGroup: iamv1alpha2.SchemeGroupVersion.Group,
|
||||
Kind: iamv1alpha2.ResourcePluralGroup,
|
||||
GroupRef: iamv1beta1.GroupRef{
|
||||
APIGroup: iamv1beta1.SchemeGroupVersion.Group,
|
||||
Kind: iamv1beta1.ResourcePluralGroup,
|
||||
Name: groupName,
|
||||
},
|
||||
}
|
||||
|
||||
return t.ksclient.IamV1alpha2().GroupBindings().Create(context.Background(), &groupBinding, metav1.CreateOptions{})
|
||||
return groupBinding, t.client.Create(context.Background(), groupBinding)
|
||||
}
|
||||
|
||||
func (t *groupOperator) ListGroupBindings(workspace string, query *query.Query) (*api.ListResult, error) {
|
||||
@@ -212,7 +198,7 @@ func (t *groupOperator) ListGroupBindings(workspace string, query *query.Query)
|
||||
return nil, err
|
||||
}
|
||||
// workspace resources must be filtered by workspace
|
||||
wsSelector := labels.Set{tenantv1alpha1.WorkspaceLabel: workspace}
|
||||
wsSelector := labels.Set{v1beta1.WorkspaceLabel: workspace}
|
||||
query.LabelSelector = labels.Merge(lableSelector, wsSelector).String()
|
||||
|
||||
result, err := t.resourceGetter.List("groupbindings", "", query)
|
||||
@@ -225,12 +211,12 @@ func (t *groupOperator) ListGroupBindings(workspace string, query *query.Query)
|
||||
|
||||
// labelGroupWithWorkspaceName adds a kubesphere.io/workspace=[workspaceName] label to namespace which
|
||||
// indicates namespace is under the workspace
|
||||
func labelGroupWithWorkspaceName(namespace *iamv1alpha2.Group, workspaceName string) *iamv1alpha2.Group {
|
||||
func labelGroupWithWorkspaceName(namespace *iamv1beta1.Group, workspaceName string) *iamv1beta1.Group {
|
||||
if namespace.Labels == nil {
|
||||
namespace.Labels = make(map[string]string, 0)
|
||||
}
|
||||
|
||||
namespace.Labels[tenantv1alpha1.WorkspaceLabel] = workspaceName // label namespace with workspace name
|
||||
namespace.Labels[v1beta1.WorkspaceLabel] = workspaceName // label namespace with workspace name
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
@@ -1,69 +1,59 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package im
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
"k8s.io/utils/ptr"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
"kubesphere.io/kubesphere/pkg/models/auth"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
resourcev1beta1 "kubesphere.io/kubesphere/pkg/models/resources/v1beta1"
|
||||
)
|
||||
|
||||
type IdentityManagementInterface interface {
|
||||
CreateUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error)
|
||||
CreateUser(user *iamv1beta1.User) (*iamv1beta1.User, error)
|
||||
ListUsers(query *query.Query) (*api.ListResult, error)
|
||||
DeleteUser(username string) error
|
||||
UpdateUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error)
|
||||
DescribeUser(username string) (*iamv1alpha2.User, error)
|
||||
UpdateUser(user *iamv1beta1.User) (*iamv1beta1.User, error)
|
||||
DescribeUser(username string) (*iamv1beta1.User, error)
|
||||
ModifyPassword(username string, password string) error
|
||||
ListLoginRecords(username string, query *query.Query) (*api.ListResult, error)
|
||||
PasswordVerify(username string, password string) error
|
||||
}
|
||||
|
||||
func NewOperator(ksClient kubesphere.Interface, userGetter resources.Interface, loginRecordGetter resources.Interface, options *authentication.Options) IdentityManagementInterface {
|
||||
func NewOperator(client runtimeclient.Client, resourceManager resourcev1beta1.ResourceManager, options *authentication.Options) IdentityManagementInterface {
|
||||
im := &imOperator{
|
||||
ksClient: ksClient,
|
||||
userGetter: userGetter,
|
||||
loginRecordGetter: loginRecordGetter,
|
||||
options: options,
|
||||
client: client,
|
||||
options: options,
|
||||
resourceManager: resourceManager,
|
||||
}
|
||||
return im
|
||||
}
|
||||
|
||||
type imOperator struct {
|
||||
ksClient kubesphere.Interface
|
||||
userGetter resources.Interface
|
||||
loginRecordGetter resources.Interface
|
||||
options *authentication.Options
|
||||
client runtimeclient.Client
|
||||
resourceManager resourcev1beta1.ResourceManager
|
||||
options *authentication.Options
|
||||
}
|
||||
|
||||
// UpdateUser returns user information after update.
|
||||
func (im *imOperator) UpdateUser(new *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
func (im *imOperator) UpdateUser(new *iamv1beta1.User) (*iamv1beta1.User, error) {
|
||||
old, err := im.fetch(new.Name)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
@@ -73,109 +63,108 @@ func (im *imOperator) UpdateUser(new *iamv1alpha2.User) (*iamv1alpha2.User, erro
|
||||
new.Spec.EncryptedPassword = old.Spec.EncryptedPassword
|
||||
status := old.Status
|
||||
// only support enable or disable
|
||||
if new.Status.State == iamv1alpha2.UserDisabled || new.Status.State == iamv1alpha2.UserActive {
|
||||
if new.Status.State == iamv1beta1.UserDisabled || new.Status.State == iamv1beta1.UserActive {
|
||||
status.State = new.Status.State
|
||||
status.LastTransitionTime = &metav1.Time{Time: time.Now()}
|
||||
}
|
||||
new.Status = status
|
||||
updated, err := im.ksClient.IamV1alpha2().Users().Update(context.Background(), new, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if err := im.client.Update(context.Background(), new); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ensurePasswordNotOutput(updated), nil
|
||||
new = new.DeepCopy()
|
||||
new.Spec.EncryptedPassword = ""
|
||||
return new, nil
|
||||
}
|
||||
|
||||
func (im *imOperator) fetch(username string) (*iamv1alpha2.User, error) {
|
||||
obj, err := im.userGetter.Get("", username)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
func (im *imOperator) fetch(username string) (*iamv1beta1.User, error) {
|
||||
user := &iamv1beta1.User{}
|
||||
if err := im.client.Get(context.Background(), types.NamespacedName{Name: username}, user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user := obj.(*iamv1alpha2.User).DeepCopy()
|
||||
return user, nil
|
||||
return user.DeepCopy(), nil
|
||||
}
|
||||
|
||||
func (im *imOperator) ModifyPassword(username string, password string) error {
|
||||
user, err := im.fetch(username)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
user.Spec.EncryptedPassword = password
|
||||
_, err = im.ksClient.IamV1alpha2().Users().Update(context.Background(), user, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if err := im.client.Update(context.Background(), user); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (im *imOperator) ListUsers(query *query.Query) (result *api.ListResult, err error) {
|
||||
result, err = im.userGetter.List("", query)
|
||||
func (im *imOperator) ListUsers(query *query.Query) (*api.ListResult, error) {
|
||||
result, err := im.resourceManager.ListResources(context.Background(), iamv1beta1.SchemeGroupVersion.WithResource(iamv1beta1.ResourcesPluralUser), "", query)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
items := make([]interface{}, 0)
|
||||
for _, item := range result.Items {
|
||||
user := item.(*iamv1alpha2.User)
|
||||
out := ensurePasswordNotOutput(user)
|
||||
items := make([]runtime.Object, 0)
|
||||
userList := result.(*iamv1beta1.UserList)
|
||||
for _, item := range userList.Items {
|
||||
out := item.DeepCopy()
|
||||
out.Spec.EncryptedPassword = ""
|
||||
items = append(items, out)
|
||||
}
|
||||
result.Items = items
|
||||
return result, nil
|
||||
total, err := strconv.ParseInt(userList.GetContinue(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &api.ListResult{Items: items, TotalItems: int(total)}, nil
|
||||
}
|
||||
|
||||
func (im *imOperator) PasswordVerify(username string, password string) error {
|
||||
obj, err := im.userGetter.Get("", username)
|
||||
user, err := im.fetch(username)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
user := obj.(*iamv1alpha2.User)
|
||||
if err = auth.PasswordVerify(user.Spec.EncryptedPassword, password); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (im *imOperator) DescribeUser(username string) (*iamv1alpha2.User, error) {
|
||||
obj, err := im.userGetter.Get("", username)
|
||||
func (im *imOperator) DescribeUser(username string) (*iamv1beta1.User, error) {
|
||||
user, err := im.fetch(username)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
user := obj.(*iamv1alpha2.User)
|
||||
return ensurePasswordNotOutput(user), nil
|
||||
out := user.DeepCopy()
|
||||
out.Spec.EncryptedPassword = ""
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (im *imOperator) DeleteUser(username string) error {
|
||||
return im.ksClient.IamV1alpha2().Users().Delete(context.Background(), username, *metav1.NewDeleteOptions(0))
|
||||
user, err := im.fetch(username)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return im.client.Delete(context.Background(), user, &runtimeclient.DeleteOptions{GracePeriodSeconds: ptr.To[int64](0)})
|
||||
}
|
||||
|
||||
func (im *imOperator) CreateUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
|
||||
user, err := im.ksClient.IamV1alpha2().Users().Create(context.Background(), user, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
func (im *imOperator) CreateUser(user *iamv1beta1.User) (*iamv1beta1.User, error) {
|
||||
if err := im.client.Create(context.Background(), user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (im *imOperator) ListLoginRecords(username string, q *query.Query) (*api.ListResult, error) {
|
||||
q.Filters[query.FieldLabel] = query.Value(fmt.Sprintf("%s=%s", iamv1alpha2.UserReferenceLabel, username))
|
||||
result, err := im.loginRecordGetter.List("", q)
|
||||
q.Filters[query.FieldLabel] = query.Value(fmt.Sprintf("%s=%s", iamv1beta1.UserReferenceLabel, username))
|
||||
result, err := im.resourceManager.ListResources(context.Background(), iamv1beta1.SchemeGroupVersion.WithResource(iamv1beta1.ResourcesPluralLoginRecord), "", q)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ensurePasswordNotOutput(user *iamv1alpha2.User) *iamv1alpha2.User {
|
||||
out := user.DeepCopy()
|
||||
// ensure encrypted password will not be output
|
||||
out.Spec.EncryptedPassword = ""
|
||||
return out
|
||||
items := make([]runtime.Object, 0)
|
||||
userList := result.(*iamv1beta1.LoginRecordList)
|
||||
for _, item := range userList.Items {
|
||||
items = append(items, item.DeepCopy())
|
||||
}
|
||||
total, err := strconv.ParseInt(userList.GetContinue(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &api.ListResult{Items: items, TotalItems: int(total)}, nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,6 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package im
|
||||
|
||||
@@ -1,349 +1,72 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
|
||||
certificatesv1 "k8s.io/api/certificates/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/utils/pkiutil"
|
||||
)
|
||||
|
||||
const (
|
||||
inClusterCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
configMapPrefix = "kubeconfig-"
|
||||
kubeconfigNameFormat = configMapPrefix + "%s"
|
||||
defaultClusterName = "local"
|
||||
defaultNamespace = "default"
|
||||
kubeconfigFileName = "config"
|
||||
configMapKind = "ConfigMap"
|
||||
configMapAPIVersion = "v1"
|
||||
privateKeyAnnotation = "kubesphere.io/private-key"
|
||||
residual = 72 * time.Hour
|
||||
ConfigTypeKubeConfig = "kubeconfig"
|
||||
SecretTypeKubeConfig = "config.kubesphere.io/" + ConfigTypeKubeConfig
|
||||
FileName = "config"
|
||||
DefaultClusterName = "local"
|
||||
DefaultNamespace = "default"
|
||||
InClusterCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
PrivateKeyAnnotation = "kubesphere.io/private-key"
|
||||
UserKubeConfigSecretNameFormat = "kubeconfig-%s"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
GetKubeConfig(username string) (string, error)
|
||||
CreateKubeConfig(user *iamv1alpha2.User) error
|
||||
UpdateKubeconfig(username string, csr *certificatesv1.CertificateSigningRequest) error
|
||||
GetKubeConfig(ctx context.Context, username string) (string, error)
|
||||
}
|
||||
|
||||
type operator struct {
|
||||
k8sClient kubernetes.Interface
|
||||
configMapLister corev1listers.ConfigMapLister
|
||||
config *rest.Config
|
||||
masterURL string
|
||||
reader runtimeclient.Reader
|
||||
masterURL string
|
||||
}
|
||||
|
||||
func NewOperator(k8sClient kubernetes.Interface, configMapLister corev1listers.ConfigMapLister, config *rest.Config) Interface {
|
||||
return &operator{k8sClient: k8sClient, configMapLister: configMapLister, config: config}
|
||||
}
|
||||
|
||||
func NewReadOnlyOperator(configMapLister corev1listers.ConfigMapLister, masterURL string) Interface {
|
||||
return &operator{configMapLister: configMapLister, masterURL: masterURL}
|
||||
}
|
||||
|
||||
// CreateKubeConfig Create kubeconfig configmap in KubeSphereControlNamespace for the specified user
|
||||
func (o *operator) CreateKubeConfig(user *iamv1alpha2.User) error {
|
||||
configName := fmt.Sprintf(kubeconfigNameFormat, user.Name)
|
||||
cm, err := o.configMapLister.ConfigMaps(constants.KubeSphereControlNamespace).Get(configName)
|
||||
// already exist and cert will not expire in 3 days
|
||||
if err == nil && !isExpired(cm, user.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// internal error
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// create a new CSR
|
||||
var ca []byte
|
||||
if len(o.config.CAData) > 0 {
|
||||
ca = o.config.CAData
|
||||
} else {
|
||||
ca, err = os.ReadFile(inClusterCAFilePath)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = o.createCSR(user.Name); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
currentContext := fmt.Sprintf("%s@%s", user.Name, defaultClusterName)
|
||||
config := clientcmdapi.Config{
|
||||
Kind: configMapKind,
|
||||
APIVersion: configMapAPIVersion,
|
||||
Preferences: clientcmdapi.Preferences{},
|
||||
Clusters: map[string]*clientcmdapi.Cluster{defaultClusterName: {
|
||||
Server: o.config.Host,
|
||||
InsecureSkipTLSVerify: false,
|
||||
CertificateAuthorityData: ca,
|
||||
}},
|
||||
Contexts: map[string]*clientcmdapi.Context{currentContext: {
|
||||
Cluster: defaultClusterName,
|
||||
AuthInfo: user.Name,
|
||||
Namespace: defaultNamespace,
|
||||
}},
|
||||
CurrentContext: currentContext,
|
||||
}
|
||||
|
||||
kubeconfig, err := clientcmd.Write(config)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update configmap if it already exist.
|
||||
if cm != nil {
|
||||
cm.Data = map[string]string{kubeconfigFileName: string(kubeconfig)}
|
||||
if _, err = o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Update(context.Background(), cm, metav1.UpdateOptions{}); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new config
|
||||
cm = &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: configMapKind,
|
||||
APIVersion: configMapAPIVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
Labels: map[string]string{constants.UsernameLabelKey: user.Name},
|
||||
},
|
||||
Data: map[string]string{kubeconfigFileName: string(kubeconfig)},
|
||||
}
|
||||
|
||||
if err = controllerutil.SetControllerReference(user, cm, scheme.Scheme); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Create(context.Background(), cm, metav1.CreateOptions{}); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
func NewReadOnlyOperator(reader runtimeclient.Reader, masterURL string) Interface {
|
||||
return &operator{reader: reader, masterURL: masterURL}
|
||||
}
|
||||
|
||||
// GetKubeConfig returns kubeconfig data for the specified user
|
||||
func (o *operator) GetKubeConfig(username string) (string, error) {
|
||||
configName := fmt.Sprintf(kubeconfigNameFormat, username)
|
||||
configMap, err := o.configMapLister.ConfigMaps(constants.KubeSphereControlNamespace).Get(configName)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
func (o *operator) GetKubeConfig(ctx context.Context, username string) (string, error) {
|
||||
secretName := fmt.Sprintf(UserKubeConfigSecretNameFormat, username)
|
||||
|
||||
secret := &corev1.Secret{}
|
||||
if err := o.reader.Get(ctx,
|
||||
types.NamespacedName{Namespace: constants.KubeSphereNamespace, Name: secretName}, secret); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data := []byte(configMap.Data[kubeconfigFileName])
|
||||
data := secret.Data[FileName]
|
||||
kubeconfig, err := clientcmd.Load(data)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
masterURL := o.masterURL
|
||||
// server host override
|
||||
if cluster := kubeconfig.Clusters[defaultClusterName]; cluster != nil && masterURL != "" {
|
||||
if cluster := kubeconfig.Clusters[DefaultClusterName]; cluster != nil && masterURL != "" {
|
||||
cluster.Server = masterURL
|
||||
}
|
||||
|
||||
data, err = clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func (o *operator) createCSR(username string) error {
|
||||
csrConfig := &certutil.Config{
|
||||
CommonName: username,
|
||||
Organization: nil,
|
||||
AltNames: certutil.AltNames{},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
|
||||
x509csr, x509key, err := pkiutil.NewCSRAndKey(csrConfig)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var csrBuffer, keyBuffer bytes.Buffer
|
||||
if err = pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(x509key)}); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var csrBytes []byte
|
||||
if csrBytes, err = x509.CreateCertificateRequest(rand.Reader, x509csr, x509key); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = pem.Encode(&csrBuffer, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes}); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
csr := csrBuffer.Bytes()
|
||||
key := keyBuffer.Bytes()
|
||||
csrName := fmt.Sprintf("%s-csr-%d", username, time.Now().Unix())
|
||||
k8sCSR := &certificatesv1.CertificateSigningRequest{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CertificateSigningRequest",
|
||||
APIVersion: "certificates.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: csrName,
|
||||
Labels: map[string]string{constants.UsernameLabelKey: username},
|
||||
Annotations: map[string]string{privateKeyAnnotation: string(key)},
|
||||
},
|
||||
Spec: certificatesv1.CertificateSigningRequestSpec{
|
||||
Request: csr,
|
||||
SignerName: certificatesv1.KubeAPIServerClientSignerName,
|
||||
Usages: []certificatesv1.KeyUsage{certificatesv1.UsageKeyEncipherment, certificatesv1.UsageClientAuth, certificatesv1.UsageDigitalSignature},
|
||||
Username: username,
|
||||
Groups: []string{user.AllAuthenticated},
|
||||
},
|
||||
}
|
||||
|
||||
// create csr
|
||||
if _, err = o.k8sClient.CertificatesV1().CertificateSigningRequests().Create(context.Background(), k8sCSR, metav1.CreateOptions{}); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateKubeconfig Update client key and client certificate after CertificateSigningRequest has been approved
|
||||
func (o *operator) UpdateKubeconfig(username string, csr *certificatesv1.CertificateSigningRequest) error {
|
||||
configName := fmt.Sprintf(kubeconfigNameFormat, username)
|
||||
configMap, err := o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Get(context.Background(), configName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
configMap = applyCert(configMap, csr)
|
||||
_, err = o.k8sClient.CoreV1().ConfigMaps(constants.KubeSphereControlNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyCert(cm *corev1.ConfigMap, csr *certificatesv1.CertificateSigningRequest) *corev1.ConfigMap {
|
||||
data := []byte(cm.Data[kubeconfigFileName])
|
||||
kubeconfig, err := clientcmd.Load(data)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return cm
|
||||
}
|
||||
|
||||
username := getControlledUsername(cm)
|
||||
privateKey := csr.Annotations[privateKeyAnnotation]
|
||||
clientCert := csr.Status.Certificate
|
||||
kubeconfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
|
||||
username: {
|
||||
ClientKeyData: []byte(privateKey),
|
||||
ClientCertificateData: clientCert,
|
||||
},
|
||||
}
|
||||
|
||||
data, err = clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return cm
|
||||
}
|
||||
|
||||
cm.Data[kubeconfigFileName] = string(data)
|
||||
return cm
|
||||
}
|
||||
|
||||
func getControlledUsername(cm *corev1.ConfigMap) string {
|
||||
for _, ownerReference := range cm.OwnerReferences {
|
||||
if ownerReference.Kind == iamv1alpha2.ResourceKindUser {
|
||||
return ownerReference.Name
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// isExpired returns whether the client certificate in kubeconfig is expired
|
||||
func isExpired(cm *corev1.ConfigMap, username string) bool {
|
||||
data := []byte(cm.Data[kubeconfigFileName])
|
||||
kubeconfig, err := clientcmd.Load(data)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return true
|
||||
}
|
||||
authInfo, ok := kubeconfig.AuthInfos[username]
|
||||
if ok {
|
||||
clientCert, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return true
|
||||
}
|
||||
for _, cert := range clientCert {
|
||||
if cert.NotAfter.Before(time.Now().Add(residual)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
//ignore the kubeconfig, since it's not approved yet.
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
certificatesv1 "k8s.io/api/certificates/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sinformers "k8s.io/client-go/informers"
|
||||
k8sfake "k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
const fakeKubeConfig = `
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1EWXlPVEEzTXpBME5Wb1hEVE14TURZeU56QTNNekEwTlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHFKCk52NnRiWWdyampJbFliSkZDWFNkaVNjYWxuckE2cGNEakQya2tBaW1RNXlkNEdrV0QwcTQ0WEpGSFdxcjRsWkwKTkJJSjlQSUFNNzVWRVVWcjh6NFNOVVBmckEvVERtSTZhaTlQVGNYOWtlOGFCZzV1U0dsbS9LUkZlcVVwVXA3awozUkp5MjlVcGNYb2pITm1EY0FPaWFLRi9NbnliZG1pV0lmcUJHaVZMSEdhcmdleTZCVzgrTGVNR3NWV0lpWVhVCkUwK3F0MG96R0lJaUNhVC9CaEdwNHlLczVWT0NheWRjNStaUnppYUJQMTk1Q3JqRllJNVR0UHMzb3JBcGhVSzcKZmd3NjFSZWhsMHQyd0x6bFFLSjM4RXJSNlUzMGwwR3h0MzhRTTVwbkt3cTQvOFBvbjkxYTlaNE1Dc3J6aDVYegpnbXZ4RmFyS0kxMWNQclRwaCtjQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZPa3NZMHExdEFUL3RUZ1JldG1kVHNDamN2Nm9NQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBNXhuQngrdDZickMxNWttSHdLemdId09RZDlvNHpwR2orcDdmb2x5RnlEempuMDFOMwp4YXpwUUF4TU1UVHNtVjJMWWFyVm9KOU4xOGlmVndQV01HNEoyTGc3TTFBVUFKVU1BdmVYU0cveVY5eGx2QUtlCkdsek0wRSs5Y1IxR1cxL1hQcHc0ZWpmYWM0T2hlN09XUEhDcVVFVHJ0eWlTcmJGcWU3dmNLbS82dGlhQWphclUKMllzOGMzbjAyZUdKc1B4RUVwazVjRC9WQUxNOWlCUzJZQnBCanc0dDdHWTFERWtya2xsNkx1R0VtS05GRVBKOQpLOHFIYTQ2TFVTT3pNS1NLM2xndFIxQ2ZpSTBJZFBhdUQ5eGdaZ0VqZGdkcloxTHhYT01RTXlmOGl1Z0ZWblQvCmcyU0pjSEQ4QUZLQmwrUEZJdExuTVhBcEh1aUd2SkVLNzg1NQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
server: https://192.168.0.3:6443
|
||||
name: cluster.local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: cluster.local
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@cluster.local
|
||||
current-context: kubernetes-admin@cluster.local
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubernetes-admin
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lJSkE2T3o0VitnTTR3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TVRBMk1qa3dOek13TkRWYUZ3MHlNakEyTWprd056TXdORGhhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTBDaVd3OFl3cUpuT1FNU2kKQk9tZy9CeldseXZtU2dRU25iSlFyelZ3dUpYNWd2c0ZidFkwRmZwRFhqOTBvblg0UnRlelJWV3BtdHRrVEJXQwpzeHZqcitENUU0Rk9oV3AySHNsM1V3NWdTdVk0KzhvYWV3ZUwrRXRLL0FIeFlnL3Q5SE9reGhwRi9iYmVSTzhvCkN1NmRiT1dMZFpvTmN5RDlUaEgwZC8rZy9CakwwbklHQ0tpNk4rRloyQk5ZRkMxMWhmaitPUm1WRTdnTmQwYkQKVlp6YXYvOXVoZmljWUlBQ0FYa2d5NU5EWHY4enFXQ2NZY0VwbWppZ1RtZGFaV3N2c0F5QUh6c1gzS1JaNHU2VgpVbktqY09jWFdaZ1RqSE5xb3pjUEw4cEszQVBsbndsTThYcXd1S1JILzdnREVTNWRGaEZRdFdRMjB5TEVtNkNHCkJ5ekRTd0lEQVFBQm8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0h3WURWUjBqQkJnd0ZvQVU2U3hqU3JXMEJQKzFPQkY2MloxT3dLTnkvcWd3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFMY1J5Q3RGY2cwc0JIUFNibm9kQ21ralhUaFFKRFRDRWNObDNOMVJySWlZYncxRTUyV0hoZURkCll2OFBiK0hoeHRtYzNETzJSV2V5MWhJT2NGL1JHRE11bXpYMkJRMThuSU5zRk9ZTzF5ejNEamdsQ1RHdVdqejYKcmc4ZEZBWjVxMzhxT1pQYjF6RE1sWVZIdGQ2QVR3eFRxbjZhL3N3RXdsYVo1ME5JMzBCNTJMTXNYWWVJSlJ3NQpEUlZ3KzhVR3l3dDgwU3YxU3dvamRMd3dWcHhCc0lYemJBNXJjR3B6by9jayt2ZDI0Yys3bzYvVGJJV0hmVWxNCloyMzBobGNGS2t1OU8wb2habEVYVGpOQTVQcUdSdG5ieXlsaEdOWWxHaUVMQTQvK1Z6ZWZ0YXJoMmwvL1E4d3EKRElNTlJmazBwQTBTb21IUWl4d1FlTktCRDBYd3ZZRT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMENpV3c4WXdxSm5PUU1TaUJPbWcvQnpXbHl2bVNnUVNuYkpRcnpWd3VKWDVndnNGCmJ0WTBGZnBEWGo5MG9uWDRSdGV6UlZXcG10dGtUQldDc3h2anIrRDVFNEZPaFdwMkhzbDNVdzVnU3VZNCs4b2EKZXdlTCtFdEsvQUh4WWcvdDlIT2t4aHBGL2JiZVJPOG9DdTZkYk9XTGRab05jeUQ5VGhIMGQvK2cvQmpMMG5JRwpDS2k2TitGWjJCTllGQzExaGZqK09SbVZFN2dOZDBiRFZaemF2Lzl1aGZpY1lJQUNBWGtneTVORFh2OHpxV0NjClljRXBtamlnVG1kYVpXc3ZzQXlBSHpzWDNLUlo0dTZWVW5LamNPY1hXWmdUakhOcW96Y1BMOHBLM0FQbG53bE0KOFhxd3VLUkgvN2dERVM1ZEZoRlF0V1EyMHlMRW02Q0dCeXpEU3dJREFRQUJBb0lCQUhyU0NDc1pyS28rbmprUApESDRUajc1U0ViZisyaEdBRjYvZWY4YnhwRUgxazlSWjRwbkVYOVU3NWpZZEFPZSs3YkIzSXpyYzBZY2l2aW82Cll2VGxsdEcyejZCWG9vb01DQWdnWFh5dk5kZmJ3WEdualRwY2VKVVhiL1lEKzNZZDZneGJrN1NqMmZwYXhRa3QKaDVYenR3V0M1MmVMYnpZb0YrM1JvRXFSbFY1SS9zTEZRVndMNmhEZkF2cnJXUTBHajVJZmZtNUxqeTR3VFdmQQpCWkRidEVWWVVZVkdqVWZ3ZndFTUpoUjlaZmk3OUhBNVgxZ0xLMWkxTDJNdTdzZzl5YUoxMDRubVpuQW5Fc2ViCkRDRnlJbHBCMUM1Tlh4Qm0rSGtCL2JVMkd0OTdwVXVHR2h4RkRtTmJlTyt4NS92Wm1Ha0tvTEE1V242NWM3RG8KeWJUV3hGRUNnWUVBNmNyV3BmT0Nhb29TWmc1eFBCSHpwbkd5d29VN0t0UTJGcURxc0I1djdIOFpTbGxhS0VaNQpGZkUwRCszcUFXeWtmTTQ0bm54Uk5xN1hOR3Rwa1c0MFhRREg3V2JyTjdVYUV4TjJCSnVXUUNWbEtiaG8yMUszCkp1K0lUaDQ5bUlPSWkrcUFFWHVxZ2hLaHpkQXZsZGF0L2YwVThHK0srQVJoT3dOalJJcTdEN01DZ1lFQTQrNW8KOWdEekI1eWtPQ1NBVEw2ckYyTmFjUGlLaUdZSEoyV1dPY0RHeGFvSUs2VUk4WjUrY0VmSHFDbG9TSFFjem9yVApyYnAzM1R0eFhNbzhhQlZNc243Q0NNU25FMmxsT1NsWllxNGV6NEh6bStVSU92WVJ0cCtuTnk0SUJOU2ZRUE5wCnR5b2I3VjZqSXZTanpiOVNEQzVOakpweFFYU0txbStBWC9ZMjhna0NnWUVBb29HZHBnaVhWRnJZNHh1UzFnQmMKYmd1R0IvUDM1cE5QYlhjNDZtYWR3Yk91N3FFaEsvR2daUUllQUJ5TmxhUGd5ZWZHTDFPV1YvNDhGSEc5Rlp1Vwp4amF1d1hQU2VBeG9MVzVQa0hCZGhnVDRSb0dxVVJrenVkcXgwaXJ2QWI0Y0FiVmtnOEtFQ0puTzRuS2RRUGZTClJVUFBkRGowVGVVdGVJbW9USkpwNkVVQ2dZRUF6SHlibGZpTUVJd3JtR0xHNkJNM0U2aUMvMDg3bWR0UEY3MC8KNVZoWi9BUHJpSnhyUmJuWDNZdklSOG0rVVNJNnBlSk92bEhJTDZhZ3NZcU9YeUtjeUphSUphMm41dlpyWmJqLwpCRlVLTjBoeThhMnNrSmtxa3hqd3Y4U0FWVFVjR3YxR0hwbWNySHgzQjJsTGU4N2xJU0I1V21kRXJHQ045eEpKCnJjNEt4V0VDZ1lFQTV6NUR2QlFxdzc2bXhEQ3lUd05HcUh3MVFqZmYwdXdTNmU0cGNVV1M0MVdjT3dlUm9NNGwKakZWSXlUNVRUbXd1QTBlS0VtMXZiM000VU5TeHN0eG14WDlhbVg2RzRlSSt4Uy94QmdHbTMyTWNMVzl5NjQyZwpSaDhScjVETTdpUlI4VzZFWVpnMStLR2sxcHAvRmxFWFBlWVhnY3hzK01NV3NTNmhaK0YzQzU4PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
|
||||
`
|
||||
|
||||
func Test_operator_CreateKubeConfig(t *testing.T) {
|
||||
config, err := clientcmd.RESTConfigFromKubeConfig([]byte(fakeKubeConfig))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
k8sClient := k8sfake.NewSimpleClientset()
|
||||
k8sInformers := k8sinformers.NewSharedInformerFactory(k8sClient, 0)
|
||||
operator := NewOperator(k8sClient, k8sInformers.Core().V1().ConfigMaps().Lister(), config)
|
||||
|
||||
user1 := &iamv1alpha2.User{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
APIVersion: iamv1alpha2.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "user1",
|
||||
},
|
||||
}
|
||||
|
||||
if err := operator.CreateKubeConfig(user1); err != nil {
|
||||
t.Errorf("CreateKubeConfig() unexpected error %v", err)
|
||||
}
|
||||
|
||||
if len(k8sClient.Actions()) != 2 {
|
||||
t.Errorf("CreateKubeConfig() unexpected action %v", k8sClient.Actions())
|
||||
return
|
||||
}
|
||||
|
||||
csrCreateAction, ok := k8sClient.Actions()[0].(k8stesting.CreateActionImpl)
|
||||
if !ok {
|
||||
t.Errorf("CreateKubeConfig() unexpected action %v", k8sClient.Actions()[0])
|
||||
return
|
||||
}
|
||||
|
||||
csr, ok := csrCreateAction.Object.(*certificatesv1.CertificateSigningRequest)
|
||||
if !ok {
|
||||
t.Errorf("CreateKubeConfig() unexpected object %v", csrCreateAction.Object)
|
||||
return
|
||||
}
|
||||
if csr.Labels[constants.UsernameLabelKey] != user1.Name || csr.Annotations[privateKeyAnnotation] == "" {
|
||||
t.Errorf("CreateKubeConfig() unexpected CertificateSigningRequest %v", csr)
|
||||
return
|
||||
}
|
||||
cmCreateAction := k8sClient.Actions()[1].(k8stesting.CreateActionImpl)
|
||||
if !ok {
|
||||
t.Errorf("CreateKubeConfig() unexpected action %v", k8sClient.Actions()[1])
|
||||
return
|
||||
}
|
||||
cm, ok := cmCreateAction.Object.(*corev1.ConfigMap)
|
||||
if !ok {
|
||||
t.Errorf("CreateKubeConfig() unexpected object %v", cmCreateAction.Object)
|
||||
return
|
||||
}
|
||||
if cm.Labels[constants.UsernameLabelKey] != user1.Name || len(cm.Data) == 0 {
|
||||
t.Errorf("CreateKubeConfig() unexpected ConfigMap %v", cm)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinfomers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
iamv1alpha2informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
const (
|
||||
namespace = constants.KubeSphereControlNamespace
|
||||
deployNameFormat = "kubectl-%s"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
GetKubectlPod(username string) (models.PodInfo, error)
|
||||
CreateKubectlDeploy(username string, owner metav1.Object) error
|
||||
}
|
||||
|
||||
type operator struct {
|
||||
k8sClient kubernetes.Interface
|
||||
deploymentInformer appsv1informers.DeploymentInformer
|
||||
podInformer coreinfomers.PodInformer
|
||||
userInformer iamv1alpha2informers.UserInformer
|
||||
kubectlImage string
|
||||
}
|
||||
|
||||
func NewOperator(k8sClient kubernetes.Interface, deploymentInformer appsv1informers.DeploymentInformer,
|
||||
podInformer coreinfomers.PodInformer, userInformer iamv1alpha2informers.UserInformer, kubectlImage string) Interface {
|
||||
return &operator{k8sClient: k8sClient, deploymentInformer: deploymentInformer, podInformer: podInformer,
|
||||
userInformer: userInformer, kubectlImage: kubectlImage}
|
||||
}
|
||||
|
||||
func (o *operator) GetKubectlPod(username string) (models.PodInfo, error) {
|
||||
deployName := fmt.Sprintf(deployNameFormat, username)
|
||||
deploy, err := o.deploymentInformer.Lister().Deployments(namespace).Get(deployName)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return models.PodInfo{}, err
|
||||
}
|
||||
|
||||
selectors := deploy.Spec.Selector.MatchLabels
|
||||
labelSelector := labels.Set(selectors).AsSelector()
|
||||
pods, err := o.podInformer.Lister().Pods(namespace).List(labelSelector)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return models.PodInfo{}, err
|
||||
}
|
||||
|
||||
pod, err := selectCorrectPod(namespace, pods)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return models.PodInfo{}, err
|
||||
}
|
||||
|
||||
info := models.PodInfo{Namespace: pod.Namespace, Pod: pod.Name, Container: pod.Status.ContainerStatuses[0].Name}
|
||||
|
||||
return info, nil
|
||||
|
||||
}
|
||||
|
||||
func selectCorrectPod(namespace string, pods []*v1.Pod) (kubectlPod *v1.Pod, err error) {
|
||||
|
||||
var kubectlPodList []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == "Ready" && condition.Status == "True" {
|
||||
kubectlPodList = append(kubectlPodList, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(kubectlPodList) < 1 {
|
||||
err = fmt.Errorf("cannot find valid kubectl pod in namespace:%s", namespace)
|
||||
return &v1.Pod{}, err
|
||||
}
|
||||
|
||||
random := rand.Intn(len(kubectlPodList))
|
||||
|
||||
return kubectlPodList[random], nil
|
||||
}
|
||||
|
||||
func (o *operator) CreateKubectlDeploy(username string, owner metav1.Object) error {
|
||||
deployName := fmt.Sprintf(deployNameFormat, username)
|
||||
|
||||
_, err := o.userInformer.Lister().Get(username)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
// ignore if user not exist
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
replica := int32(1)
|
||||
selector := metav1.LabelSelector{MatchLabels: map[string]string{constants.UsernameLabelKey: username}}
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deployName,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replica,
|
||||
Selector: &selector,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
constants.UsernameLabelKey: username,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "kubectl",
|
||||
Image: o.kubectlImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-time",
|
||||
MountPath: "/etc/localtime",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "kubesphere-cluster-admin",
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "host-time",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/etc/localtime",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// bind the lifecycle of role binding
|
||||
err = controllerutil.SetControllerReference(owner, deployment, scheme.Scheme)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = o.k8sClient.AppsV1().Deployments(namespace).Create(context.Background(), deployment, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/logging"
|
||||
)
|
||||
|
||||
type LoggingOperator interface {
|
||||
GetCurrentStats(sf logging.SearchFilter) (v1alpha2.APIResponse, error)
|
||||
CountLogsByInterval(sf logging.SearchFilter, interval string) (v1alpha2.APIResponse, error)
|
||||
ExportLogs(sf logging.SearchFilter, w io.Writer) error
|
||||
SearchLogs(sf logging.SearchFilter, from, size int64, order string) (v1alpha2.APIResponse, error)
|
||||
}
|
||||
|
||||
type loggingOperator struct {
|
||||
c logging.Client
|
||||
}
|
||||
|
||||
func NewLoggingOperator(client logging.Client) LoggingOperator {
|
||||
return &loggingOperator{client}
|
||||
}
|
||||
|
||||
func (l loggingOperator) GetCurrentStats(sf logging.SearchFilter) (v1alpha2.APIResponse, error) {
|
||||
res, err := l.c.GetCurrentStats(sf)
|
||||
return v1alpha2.APIResponse{Statistics: &res}, err
|
||||
}
|
||||
|
||||
func (l loggingOperator) CountLogsByInterval(sf logging.SearchFilter, interval string) (v1alpha2.APIResponse, error) {
|
||||
res, err := l.c.CountLogsByInterval(sf, interval)
|
||||
return v1alpha2.APIResponse{Histogram: &res}, err
|
||||
}
|
||||
|
||||
func (l loggingOperator) ExportLogs(sf logging.SearchFilter, w io.Writer) error {
|
||||
return l.c.ExportLogs(sf, w)
|
||||
}
|
||||
|
||||
func (l loggingOperator) SearchLogs(sf logging.SearchFilter, from, size int64, order string) (v1alpha2.APIResponse, error) {
|
||||
res, err := l.c.SearchLogs(sf, from, size, order)
|
||||
return v1alpha2.APIResponse{Logs: &res}, err
|
||||
}
|
||||
@@ -1,381 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package metering
|
||||
|
||||
type PriceInfo struct {
|
||||
// currency unit, currently support CNY and USD
|
||||
Currency string `json:"currency" description:"currency"`
|
||||
// cpu cost with above currency unit for per core per hour
|
||||
CpuPerCorePerHour float64 `json:"cpu_per_core_per_hour,omitempty" description:"cpu price"`
|
||||
// mem cost with above currency unit for per GB per hour
|
||||
MemPerGigabytesPerHour float64 `json:"mem_per_gigabytes_per_hour,omitempty" description:"mem price"`
|
||||
// ingress network traffic cost with above currency unit for per MB per hour
|
||||
IngressNetworkTrafficPerMegabytesPerHour float64 `json:"ingress_network_traffic_per_megabytes_per_hour,omitempty" description:"ingress price"`
|
||||
// egress network traffice cost with above currency unit for per MB per hour
|
||||
EgressNetworkTrafficPerMegabytesPerHour float64 `json:"egress_network_traffic_per_megabytes_per_hour,omitempty" description:"egress price"`
|
||||
// pvc cost with above currency unit for per GB per hour
|
||||
PvcPerGigabytesPerHour float64 `json:"pvc_per_gigabytes_per_hour,omitempty" description:"pvc price"`
|
||||
}
|
||||
|
||||
type PriceResponse struct {
|
||||
RetentionDay string `json:"retention_day"`
|
||||
PriceInfo `json:",inline"`
|
||||
}
|
||||
|
||||
type PodStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" desription:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
}
|
||||
|
||||
type PodsStats map[string]*PodStatistic
|
||||
|
||||
func (ps *PodsStats) Set(podName, meterName string, value float64) {
|
||||
if _, ok := (*ps)[podName]; !ok {
|
||||
(*ps)[podName] = &PodStatistic{}
|
||||
}
|
||||
switch meterName {
|
||||
case "meter_pod_cpu_usage":
|
||||
(*ps)[podName].CPUUsage = value
|
||||
case "meter_pod_memory_usage_wo_cache":
|
||||
(*ps)[podName].MemoryUsageWoCache = value
|
||||
case "meter_pod_net_bytes_transmitted":
|
||||
(*ps)[podName].NetBytesTransmitted = value
|
||||
case "meter_pod_net_bytes_received":
|
||||
(*ps)[podName].NetBytesReceived = value
|
||||
case "meter_pod_pvc_bytes_total":
|
||||
(*ps)[podName].PVCBytesTotal = value
|
||||
}
|
||||
}
|
||||
|
||||
type OpenPitrixStatistic struct {
|
||||
AppStatistic
|
||||
}
|
||||
|
||||
type AppStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Deploys map[string]*DeploymentStatistic `json:"deployments" description:"deployment statistic"`
|
||||
Statefulsets map[string]*StatefulsetStatistic `json:"statefulsets" description:"statefulset statistic"`
|
||||
Daemonsets map[string]*DaemonsetStatistic `json:"daemonsets" description:"daemonsets statistics"`
|
||||
}
|
||||
|
||||
func (as *AppStatistic) GetDeployStats(name string) *DeploymentStatistic {
|
||||
if as.Deploys == nil {
|
||||
as.Deploys = make(map[string]*DeploymentStatistic)
|
||||
}
|
||||
if as.Deploys[name] == nil {
|
||||
as.Deploys[name] = &DeploymentStatistic{}
|
||||
}
|
||||
return as.Deploys[name]
|
||||
}
|
||||
|
||||
func (as *AppStatistic) GetDaemonStats(name string) *DaemonsetStatistic {
|
||||
if as.Daemonsets == nil {
|
||||
as.Daemonsets = make(map[string]*DaemonsetStatistic)
|
||||
}
|
||||
if as.Daemonsets[name] == nil {
|
||||
as.Daemonsets[name] = &DaemonsetStatistic{}
|
||||
}
|
||||
return as.Daemonsets[name]
|
||||
}
|
||||
|
||||
func (as *AppStatistic) GetStatefulsetStats(name string) *StatefulsetStatistic {
|
||||
if as.Statefulsets == nil {
|
||||
as.Statefulsets = make(map[string]*StatefulsetStatistic)
|
||||
}
|
||||
if as.Statefulsets[name] == nil {
|
||||
as.Statefulsets[name] = &StatefulsetStatistic{}
|
||||
}
|
||||
return as.Statefulsets[name]
|
||||
}
|
||||
|
||||
func (as *AppStatistic) Aggregate() {
|
||||
if as.Deploys == nil && as.Statefulsets == nil && as.Daemonsets == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// aggregate deployment stats
|
||||
for _, deployObj := range as.Deploys {
|
||||
for _, podObj := range deployObj.Pods {
|
||||
deployObj.CPUUsage += podObj.CPUUsage
|
||||
deployObj.MemoryUsageWoCache += podObj.MemoryUsageWoCache
|
||||
deployObj.NetBytesTransmitted += podObj.NetBytesTransmitted
|
||||
deployObj.NetBytesReceived += podObj.NetBytesReceived
|
||||
deployObj.PVCBytesTotal += podObj.PVCBytesTotal
|
||||
}
|
||||
as.CPUUsage += deployObj.CPUUsage
|
||||
as.MemoryUsageWoCache += deployObj.MemoryUsageWoCache
|
||||
as.NetBytesTransmitted += deployObj.NetBytesTransmitted
|
||||
as.NetBytesReceived += deployObj.NetBytesReceived
|
||||
as.PVCBytesTotal += deployObj.PVCBytesTotal
|
||||
}
|
||||
|
||||
// aggregate statfulset stats
|
||||
for _, statfulObj := range as.Statefulsets {
|
||||
for _, podObj := range statfulObj.Pods {
|
||||
statfulObj.CPUUsage += podObj.CPUUsage
|
||||
statfulObj.MemoryUsageWoCache += podObj.MemoryUsageWoCache
|
||||
statfulObj.NetBytesTransmitted += podObj.NetBytesTransmitted
|
||||
statfulObj.NetBytesReceived += podObj.NetBytesReceived
|
||||
statfulObj.PVCBytesTotal += podObj.PVCBytesTotal
|
||||
}
|
||||
as.CPUUsage += statfulObj.CPUUsage
|
||||
as.MemoryUsageWoCache += statfulObj.MemoryUsageWoCache
|
||||
as.NetBytesTransmitted += statfulObj.NetBytesTransmitted
|
||||
as.NetBytesReceived += statfulObj.NetBytesReceived
|
||||
as.PVCBytesTotal += statfulObj.PVCBytesTotal
|
||||
}
|
||||
|
||||
// aggregate daemonset stats
|
||||
for _, daemonsetObj := range as.Daemonsets {
|
||||
for _, podObj := range daemonsetObj.Pods {
|
||||
daemonsetObj.CPUUsage += podObj.CPUUsage
|
||||
daemonsetObj.MemoryUsageWoCache += podObj.MemoryUsageWoCache
|
||||
daemonsetObj.NetBytesTransmitted += podObj.NetBytesTransmitted
|
||||
daemonsetObj.NetBytesReceived += podObj.NetBytesReceived
|
||||
daemonsetObj.PVCBytesTotal += podObj.PVCBytesTotal
|
||||
}
|
||||
as.CPUUsage += daemonsetObj.CPUUsage
|
||||
as.MemoryUsageWoCache += daemonsetObj.MemoryUsageWoCache
|
||||
as.NetBytesTransmitted += daemonsetObj.NetBytesTransmitted
|
||||
as.NetBytesReceived += daemonsetObj.NetBytesReceived
|
||||
as.PVCBytesTotal += daemonsetObj.PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type ServiceStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" desription:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ss *ServiceStatistic) SetPodStats(name string, podStat *PodStatistic) {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ss.Pods[name] = podStat
|
||||
}
|
||||
|
||||
func (ss *ServiceStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ss.Pods[name] == nil {
|
||||
ss.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ss.Pods[name]
|
||||
}
|
||||
|
||||
func (ss *ServiceStatistic) Aggregate() {
|
||||
if ss.Pods == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for key := range ss.Pods {
|
||||
ss.CPUUsage += ss.GetPodStats(key).CPUUsage
|
||||
ss.MemoryUsageWoCache += ss.GetPodStats(key).MemoryUsageWoCache
|
||||
ss.NetBytesTransmitted += ss.GetPodStats(key).NetBytesTransmitted
|
||||
ss.NetBytesReceived += ss.GetPodStats(key).NetBytesReceived
|
||||
}
|
||||
}
|
||||
|
||||
type DeploymentStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" desciption:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ds *DeploymentStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ds.Pods[name] == nil {
|
||||
ds.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ds.Pods[name]
|
||||
}
|
||||
|
||||
func (ds *DeploymentStatistic) SetPodStats(name string, podStat *PodStatistic) {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ds.Pods[name] = podStat
|
||||
}
|
||||
|
||||
func (ds *DeploymentStatistic) Aggregate() {
|
||||
if ds.Pods == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for key := range ds.Pods {
|
||||
ds.CPUUsage += ds.GetPodStats(key).CPUUsage
|
||||
ds.MemoryUsageWoCache += ds.GetPodStats(key).MemoryUsageWoCache
|
||||
ds.NetBytesTransmitted += ds.GetPodStats(key).NetBytesTransmitted
|
||||
ds.NetBytesReceived += ds.GetPodStats(key).NetBytesReceived
|
||||
ds.PVCBytesTotal += ds.GetPodStats(key).PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type StatefulsetStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ss *StatefulsetStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ss.Pods[name] == nil {
|
||||
ss.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ss.Pods[name]
|
||||
}
|
||||
|
||||
func (ss *StatefulsetStatistic) SetPodStats(name string, podStat *PodStatistic) {
|
||||
if ss.Pods == nil {
|
||||
ss.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ss.Pods[name] = podStat
|
||||
}
|
||||
|
||||
func (ss *StatefulsetStatistic) Aggregate() {
|
||||
if ss.Pods == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for key := range ss.Pods {
|
||||
ss.CPUUsage += ss.GetPodStats(key).CPUUsage
|
||||
ss.MemoryUsageWoCache += ss.GetPodStats(key).MemoryUsageWoCache
|
||||
ss.NetBytesTransmitted += ss.GetPodStats(key).NetBytesTransmitted
|
||||
ss.NetBytesReceived += ss.GetPodStats(key).NetBytesReceived
|
||||
ss.PVCBytesTotal += ss.GetPodStats(key).PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type DaemonsetStatistic struct {
|
||||
CPUUsage float64 `json:"cpu_usage" description:"cpu_usage"`
|
||||
MemoryUsageWoCache float64 `json:"memory_usage_wo_cache" description:"memory_usage_wo_cache"`
|
||||
NetBytesTransmitted float64 `json:"net_bytes_transmitted" description:"net_bytes_transmitted"`
|
||||
NetBytesReceived float64 `json:"net_bytes_received" description:"net_bytes_received"`
|
||||
PVCBytesTotal float64 `json:"pvc_bytes_total" description:"pvc_bytes_total"`
|
||||
Pods map[string]*PodStatistic `json:"pods" description:"pod statistic"`
|
||||
}
|
||||
|
||||
func (ds *DaemonsetStatistic) GetPodStats(name string) *PodStatistic {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
if ds.Pods[name] == nil {
|
||||
ds.Pods[name] = &PodStatistic{}
|
||||
}
|
||||
return ds.Pods[name]
|
||||
}
|
||||
|
||||
func (ds *DaemonsetStatistic) SetPodStats(name string, podStat *PodStatistic) {
|
||||
if ds.Pods == nil {
|
||||
ds.Pods = make(map[string]*PodStatistic)
|
||||
}
|
||||
ds.Pods[name] = podStat
|
||||
}
|
||||
|
||||
func (ds *DaemonsetStatistic) Aggregate() {
|
||||
if ds.Pods == nil {
|
||||
return
|
||||
}
|
||||
for key := range ds.Pods {
|
||||
ds.CPUUsage += ds.GetPodStats(key).CPUUsage
|
||||
ds.MemoryUsageWoCache += ds.GetPodStats(key).MemoryUsageWoCache
|
||||
ds.NetBytesTransmitted += ds.GetPodStats(key).NetBytesTransmitted
|
||||
ds.NetBytesReceived += ds.GetPodStats(key).NetBytesReceived
|
||||
ds.PVCBytesTotal += ds.GetPodStats(key).PVCBytesTotal
|
||||
}
|
||||
}
|
||||
|
||||
type ResourceStatistic struct {
|
||||
// openpitrix statistic
|
||||
OpenPitrixs map[string]*OpenPitrixStatistic `json:"openpitrixs" description:"openpitrix statistic"`
|
||||
|
||||
// app crd statistic
|
||||
Apps map[string]*AppStatistic `json:"apps" description:"app statistic"`
|
||||
|
||||
// k8s workload only which exclude app and op
|
||||
Deploys map[string]*DeploymentStatistic `json:"deployments" description:"deployment statistic"`
|
||||
Statefulsets map[string]*StatefulsetStatistic `json:"statefulsets" description:"statefulset statistic"`
|
||||
Daemonsets map[string]*DaemonsetStatistic `json:"daemonsets" description:"daemonsets statistics"`
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetOpenPitrixStats(name string) *OpenPitrixStatistic {
|
||||
if rs.OpenPitrixs == nil {
|
||||
rs.OpenPitrixs = make(map[string]*OpenPitrixStatistic)
|
||||
}
|
||||
if rs.OpenPitrixs[name] == nil {
|
||||
rs.OpenPitrixs[name] = &OpenPitrixStatistic{}
|
||||
}
|
||||
return rs.OpenPitrixs[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetAppStats(name string) *AppStatistic {
|
||||
if rs.Apps == nil {
|
||||
rs.Apps = make(map[string]*AppStatistic)
|
||||
}
|
||||
if rs.Apps[name] == nil {
|
||||
rs.Apps[name] = &AppStatistic{}
|
||||
}
|
||||
return rs.Apps[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetDeployStats(name string) *DeploymentStatistic {
|
||||
if rs.Deploys == nil {
|
||||
rs.Deploys = make(map[string]*DeploymentStatistic)
|
||||
}
|
||||
if rs.Deploys[name] == nil {
|
||||
rs.Deploys[name] = &DeploymentStatistic{}
|
||||
}
|
||||
return rs.Deploys[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetStatefulsetStats(name string) *StatefulsetStatistic {
|
||||
if rs.Statefulsets == nil {
|
||||
rs.Statefulsets = make(map[string]*StatefulsetStatistic)
|
||||
}
|
||||
if rs.Statefulsets[name] == nil {
|
||||
rs.Statefulsets[name] = &StatefulsetStatistic{}
|
||||
}
|
||||
return rs.Statefulsets[name]
|
||||
}
|
||||
|
||||
func (rs *ResourceStatistic) GetDaemonsetStats(name string) *DaemonsetStatistic {
|
||||
if rs.Daemonsets == nil {
|
||||
rs.Daemonsets = make(map[string]*DaemonsetStatistic)
|
||||
}
|
||||
if rs.Daemonsets[name] == nil {
|
||||
rs.Daemonsets[name] = &DaemonsetStatistic{}
|
||||
}
|
||||
return rs.Daemonsets[name]
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package expressions
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/prom-label-proxy/injectproxy"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("prometheus", labelReplace)
|
||||
}
|
||||
|
||||
func labelReplace(input, ns string) (string, error) {
|
||||
root, err := parser.ParseExpr(input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = injectproxy.NewEnforcer(false, &labels.Matcher{
|
||||
Type: labels.MatchEqual,
|
||||
Name: "namespace",
|
||||
Value: ns,
|
||||
}).EnforceNode(root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return root.String(), nil
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package expressions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestLabelReplace(t *testing.T) {
|
||||
tests := []struct {
|
||||
expr string
|
||||
expected string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
expr: "up",
|
||||
expected: `up{namespace="default"}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
expr: `up{namespace="random"}`,
|
||||
expected: `up{namespace="default"}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
expr: `up{namespace="random"} + up{job="test"}`,
|
||||
expected: `up{namespace="default"} + up{job="test",namespace="default"}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
expr: `@@@@`,
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
result, err := labelReplace(tt.expr, "default")
|
||||
if err != nil {
|
||||
if !tt.expectedErr {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(result, tt.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", tt.expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package expressions
|
||||
|
||||
type labelReplaceFn func(expr, ns string) (string, error)
|
||||
|
||||
var ReplaceNamespaceFns = make(map[string]labelReplaceFn)
|
||||
|
||||
func register(name string, fn labelReplaceFn) {
|
||||
ReplaceNamespaceFns[name] = fn
|
||||
}
|
||||
@@ -1,587 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/application/api/v1beta1"
|
||||
appv1beta1 "sigs.k8s.io/application/api/v1beta1"
|
||||
|
||||
"kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/monitoring/expressions"
|
||||
"kubesphere.io/kubesphere/pkg/models/openpitrix"
|
||||
resourcev1alpha3 "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
meteringclient "kubesphere.io/kubesphere/pkg/simple/client/metering"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
type MonitoringOperator interface {
|
||||
GetMetric(expr, namespace string, time time.Time) (monitoring.Metric, error)
|
||||
GetMetricOverTime(expr, namespace string, start, end time.Time, step time.Duration) (monitoring.Metric, error)
|
||||
GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics
|
||||
GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics
|
||||
GetMetadata(namespace string) Metadata
|
||||
GetMetricLabelSet(metric, namespace string, start, end time.Time) MetricLabelSet
|
||||
|
||||
// TODO: expose KubeSphere self metrics in Prometheus format
|
||||
GetKubeSphereStats() Metrics
|
||||
GetWorkspaceStats(workspace string) Metrics
|
||||
|
||||
// meter
|
||||
GetNamedMetersOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption, priceInfo meteringclient.PriceInfo) (Metrics, error)
|
||||
GetNamedMeters(metrics []string, time time.Time, opt monitoring.QueryOption, priceInfo meteringclient.PriceInfo) (Metrics, error)
|
||||
GetAppWorkloads(ns string, apps []string) map[string][]string
|
||||
GetSerivePodsMap(ns string, services []string) map[string][]string
|
||||
}
|
||||
|
||||
type monitoringOperator struct {
|
||||
prometheus monitoring.Interface
|
||||
metricsserver monitoring.Interface
|
||||
k8s kubernetes.Interface
|
||||
ks ksinformers.SharedInformerFactory
|
||||
op openpitrix.Interface
|
||||
resourceGetter *resourcev1alpha3.ResourceGetter
|
||||
}
|
||||
|
||||
func NewMonitoringOperator(monitoringClient monitoring.Interface, metricsClient monitoring.Interface, k8s kubernetes.Interface, factory informers.InformerFactory, resourceGetter *resourcev1alpha3.ResourceGetter, op openpitrix.Interface) MonitoringOperator {
|
||||
return &monitoringOperator{
|
||||
prometheus: monitoringClient,
|
||||
metricsserver: metricsClient,
|
||||
k8s: k8s,
|
||||
ks: factory.KubeSphereSharedInformerFactory(),
|
||||
resourceGetter: resourceGetter,
|
||||
op: op,
|
||||
}
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetMetric(expr, namespace string, time time.Time) (monitoring.Metric, error) {
|
||||
if namespace != "" {
|
||||
// Different monitoring backend implementations have different ways to enforce namespace isolation.
|
||||
// Each implementation should register itself to `ReplaceNamespaceFns` during init().
|
||||
// We hard code "prometheus" here because we only support this datasource so far.
|
||||
// In the future, maybe the value should be returned from a method like `mo.c.GetMonitoringServiceName()`.
|
||||
var err error
|
||||
expr, err = expressions.ReplaceNamespaceFns["prometheus"](expr, namespace)
|
||||
if err != nil {
|
||||
return monitoring.Metric{}, err
|
||||
}
|
||||
}
|
||||
return mo.prometheus.GetMetric(expr, time), nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetMetricOverTime(expr, namespace string, start, end time.Time, step time.Duration) (monitoring.Metric, error) {
|
||||
if namespace != "" {
|
||||
// Different monitoring backend implementations have different ways to enforce namespace isolation.
|
||||
// Each implementation should register itself to `ReplaceNamespaceFns` during init().
|
||||
// We hard code "prometheus" here because we only support this datasource so far.
|
||||
// In the future, maybe the value should be returned from a method like `mo.c.GetMonitoringServiceName()`.
|
||||
var err error
|
||||
expr, err = expressions.ReplaceNamespaceFns["prometheus"](expr, namespace)
|
||||
if err != nil {
|
||||
return monitoring.Metric{}, err
|
||||
}
|
||||
}
|
||||
return mo.prometheus.GetMetricOverTime(expr, start, end, step), nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics {
|
||||
ress := mo.prometheus.GetNamedMetrics(metrics, time, opt)
|
||||
|
||||
opts := &monitoring.QueryOptions{}
|
||||
opt.Apply(opts)
|
||||
|
||||
var isNodeRankingQuery bool
|
||||
if opts.QueryType == "rank" {
|
||||
isNodeRankingQuery = true
|
||||
}
|
||||
|
||||
if mo.metricsserver != nil {
|
||||
//Merge edge node metrics data
|
||||
edgeMetrics := make(map[string]monitoring.MetricData)
|
||||
|
||||
for i, ressMetric := range ress {
|
||||
metricName := ressMetric.MetricName
|
||||
ressMetricValues := ressMetric.MetricData.MetricValues
|
||||
if len(ressMetricValues) == 0 || isNodeRankingQuery {
|
||||
// this metric has no prometheus metrics data or the request need to list all nodes metrics
|
||||
if len(edgeMetrics) == 0 {
|
||||
// start to request monintoring metricsApi data
|
||||
mr := mo.metricsserver.GetNamedMetrics(metrics, time, opt)
|
||||
for _, mrMetric := range mr {
|
||||
edgeMetrics[mrMetric.MetricName] = mrMetric.MetricData
|
||||
}
|
||||
}
|
||||
if val, ok := edgeMetrics[metricName]; ok {
|
||||
ress[i].MetricData.MetricValues = append(ress[i].MetricData.MetricValues, val.MetricValues...)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return Metrics{Results: ress}
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics {
|
||||
ress := mo.prometheus.GetNamedMetricsOverTime(metrics, start, end, step, opt)
|
||||
|
||||
if mo.metricsserver != nil {
|
||||
|
||||
//Merge edge node metrics data
|
||||
edgeMetrics := make(map[string]monitoring.MetricData)
|
||||
|
||||
for i, ressMetric := range ress {
|
||||
metricName := ressMetric.MetricName
|
||||
ressMetricValues := ressMetric.MetricData.MetricValues
|
||||
if len(ressMetricValues) == 0 {
|
||||
// this metric has no prometheus metrics data
|
||||
if len(edgeMetrics) == 0 {
|
||||
// start to request monintoring metricsApi data
|
||||
mr := mo.metricsserver.GetNamedMetricsOverTime(metrics, start, end, step, opt)
|
||||
for _, mrMetric := range mr {
|
||||
edgeMetrics[mrMetric.MetricName] = mrMetric.MetricData
|
||||
}
|
||||
}
|
||||
if val, ok := edgeMetrics[metricName]; ok {
|
||||
ress[i].MetricData.MetricValues = append(ress[i].MetricData.MetricValues, val.MetricValues...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Metrics{Results: ress}
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetMetadata(namespace string) Metadata {
|
||||
data := mo.prometheus.GetMetadata(namespace)
|
||||
return Metadata{Data: data}
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetMetricLabelSet(metric, namespace string, start, end time.Time) MetricLabelSet {
|
||||
var expr = metric
|
||||
var err error
|
||||
if namespace != "" {
|
||||
// Different monitoring backend implementations have different ways to enforce namespace isolation.
|
||||
// Each implementation should register itself to `ReplaceNamespaceFns` during init().
|
||||
// We hard code "prometheus" here because we only support this datasource so far.
|
||||
// In the future, maybe the value should be returned from a method like `mo.c.GetMonitoringServiceName()`.
|
||||
expr, err = expressions.ReplaceNamespaceFns["prometheus"](metric, namespace)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return MetricLabelSet{}
|
||||
}
|
||||
}
|
||||
data := mo.prometheus.GetMetricLabelSet(expr, start, end)
|
||||
return MetricLabelSet{Data: data}
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetKubeSphereStats() Metrics {
|
||||
var res Metrics
|
||||
now := float64(time.Now().Unix())
|
||||
|
||||
clusterList, err := mo.ks.Cluster().V1alpha1().Clusters().Lister().List(labels.Everything())
|
||||
clusterTotal := len(clusterList)
|
||||
if clusterTotal == 0 {
|
||||
clusterTotal = 1
|
||||
}
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereClusterCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereClusterCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(clusterTotal)},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
wkList, err := mo.ks.Tenant().V1alpha2().WorkspaceTemplates().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereWorkspaceCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereWorkspaceCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(len(wkList))},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
usrList, err := mo.ks.Iam().V1alpha2().Users().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereUserCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereUserCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(len(usrList))},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
cond := ¶ms.Conditions{
|
||||
Match: map[string]string{
|
||||
openpitrix.Status: openpitrix.StatusActive,
|
||||
openpitrix.RepoId: openpitrix.BuiltinRepoId,
|
||||
},
|
||||
}
|
||||
if mo.op != nil {
|
||||
tmpl, err := mo.op.ListApps(cond, "", false, 0, 0)
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereAppTmplCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: KubeSphereAppTmplCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(tmpl.TotalCount)},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetWorkspaceStats(workspace string) Metrics {
|
||||
var res Metrics
|
||||
now := float64(time.Now().Unix())
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: workspace})
|
||||
opt := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
|
||||
nsList, err := mo.k8s.CoreV1().Namespaces().List(context.Background(), opt)
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceNamespaceCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceNamespaceCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(len(nsList.Items))},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
devopsList, err := mo.ks.Devops().V1alpha3().DevOpsProjects().Lister().List(selector)
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceDevopsCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceDevopsCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(len(devopsList))},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
r, _ := labels.NewRequirement(v1alpha2.UserReferenceLabel, selection.Exists, nil)
|
||||
memberSelector := selector.DeepCopySelector().Add(*r)
|
||||
memberList, err := mo.ks.Iam().V1alpha2().WorkspaceRoleBindings().Lister().List(memberSelector)
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceMemberCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceMemberCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(len(memberList))},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
roleList, err := mo.ks.Iam().V1alpha2().WorkspaceRoles().Lister().List(selector)
|
||||
if err != nil {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceRoleCount,
|
||||
Error: err.Error(),
|
||||
})
|
||||
} else {
|
||||
res.Results = append(res.Results, monitoring.Metric{
|
||||
MetricName: WorkspaceRoleCount,
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Sample: &monitoring.Point{now, float64(len(roleList))},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
meter related methods
|
||||
*/
|
||||
|
||||
func (mo monitoringOperator) getNamedMetersWithHourInterval(meters []string, t time.Time, opt monitoring.QueryOption) Metrics {
|
||||
|
||||
var opts []monitoring.QueryOption
|
||||
|
||||
opts = append(opts, opt)
|
||||
opts = append(opts, monitoring.MeterOption{
|
||||
Step: 1 * time.Hour,
|
||||
})
|
||||
|
||||
ress := mo.prometheus.GetNamedMeters(meters, t, opts)
|
||||
|
||||
return Metrics{Results: ress}
|
||||
}
|
||||
|
||||
func generateScalingFactorMap(step time.Duration) map[string]float64 {
|
||||
scalingMap := make(map[string]float64)
|
||||
|
||||
for k := range MeterResourceMap {
|
||||
scalingMap[k] = step.Hours()
|
||||
}
|
||||
return scalingMap
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption, priceInfo meteringclient.PriceInfo) (metrics Metrics, err error) {
|
||||
|
||||
if step.Hours() < 1 {
|
||||
klog.Warning("step should be longer than one hour")
|
||||
step = 1 * time.Hour
|
||||
}
|
||||
if end.Sub(start).Hours() > 30*24 {
|
||||
if step.Hours() < 24 {
|
||||
err = errors.New("step should be larger than 24 hours")
|
||||
return
|
||||
}
|
||||
}
|
||||
if math.Mod(step.Hours(), 1.0) > 0 {
|
||||
err = errors.New("step should be integer hours")
|
||||
return
|
||||
}
|
||||
|
||||
// query time range: (start, end], so here we need to exclude start itself.
|
||||
if start.Add(time.Hour).After(end) {
|
||||
start = end
|
||||
} else {
|
||||
start = start.Add(time.Hour)
|
||||
}
|
||||
|
||||
var opts []monitoring.QueryOption
|
||||
|
||||
opts = append(opts, opt)
|
||||
opts = append(opts, monitoring.MeterOption{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: time.Hour,
|
||||
})
|
||||
|
||||
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, time.Hour, opts)
|
||||
sMap := generateScalingFactorMap(step)
|
||||
|
||||
for i := range ress {
|
||||
ress[i].MetricData = updateMetricStatData(ress[i], sMap, priceInfo)
|
||||
}
|
||||
|
||||
return Metrics{Results: ress}, nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetNamedMeters(meters []string, time time.Time, opt monitoring.QueryOption, priceInfo meteringclient.PriceInfo) (Metrics, error) {
|
||||
|
||||
metersPerHour := mo.getNamedMetersWithHourInterval(meters, time, opt)
|
||||
|
||||
for metricIndex := range metersPerHour.Results {
|
||||
|
||||
res := metersPerHour.Results[metricIndex]
|
||||
|
||||
metersPerHour.Results[metricIndex].MetricData = updateMetricStatData(res, nil, priceInfo)
|
||||
}
|
||||
|
||||
return metersPerHour, nil
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetAppWorkloads(ns string, apps []string) map[string][]string {
|
||||
|
||||
componentsMap := make(map[string][]string)
|
||||
applicationList := []*appv1beta1.Application{}
|
||||
|
||||
result, err := mo.resourceGetter.List("applications", ns, query.New())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, obj := range result.Items {
|
||||
app, ok := obj.(*appv1beta1.Application)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
applicationList = append(applicationList, app)
|
||||
}
|
||||
|
||||
getAppFullName := func(appObject *v1beta1.Application) (name string) {
|
||||
name = appObject.Labels[constants.ApplicationName]
|
||||
if appObject.Labels[constants.ApplicationVersion] != "" {
|
||||
name += fmt.Sprintf(":%v", appObject.Labels[constants.ApplicationVersion])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
appFilter := func(appObject *v1beta1.Application) bool {
|
||||
|
||||
for _, app := range apps {
|
||||
var applicationName, applicationVersion string
|
||||
tmp := strings.Split(app, ":")
|
||||
|
||||
if len(tmp) >= 1 {
|
||||
applicationName = tmp[0]
|
||||
}
|
||||
if len(tmp) == 2 {
|
||||
applicationVersion = tmp[1]
|
||||
}
|
||||
|
||||
if applicationName != "" && appObject.Labels[constants.ApplicationName] != applicationName {
|
||||
return false
|
||||
}
|
||||
if applicationVersion != "" && appObject.Labels[constants.ApplicationVersion] != applicationVersion {
|
||||
return false
|
||||
}
|
||||
//nolint:staticcheck
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
for _, appObj := range applicationList {
|
||||
if appFilter(appObj) {
|
||||
for _, com := range appObj.Status.ComponentList.Objects {
|
||||
//nolint:staticcheck // TODO Use golang.org/x/text/cases instead.
|
||||
kind := strings.Title(com.Kind)
|
||||
name := com.Name
|
||||
componentsMap[getAppFullName((appObj))] = append(componentsMap[getAppFullName(appObj)], kind+":"+name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return componentsMap
|
||||
}
|
||||
|
||||
func (mo monitoringOperator) GetSerivePodsMap(ns string, services []string) map[string][]string {
|
||||
var svcPodsMap = make(map[string][]string)
|
||||
|
||||
for _, svc := range services {
|
||||
svcObj, err := mo.k8s.CoreV1().Services(ns).Get(context.Background(), svc, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
svcSelector := svcObj.Spec.Selector
|
||||
if len(svcSelector) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
svcLabels := labels.Set{}
|
||||
for key, value := range svcSelector {
|
||||
svcLabels[key] = value
|
||||
}
|
||||
|
||||
selector := labels.SelectorFromSet(svcLabels)
|
||||
opt := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
|
||||
podList, err := mo.k8s.CoreV1().Pods(ns).List(context.Background(), opt)
|
||||
if err != nil {
|
||||
klog.Error(err.Error())
|
||||
return svcPodsMap
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
svcPodsMap[svc] = append(svcPodsMap[svc], pod.Name)
|
||||
}
|
||||
|
||||
}
|
||||
return svcPodsMap
|
||||
}
|
||||
@@ -1,338 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
const (
|
||||
KubeSphereWorkspaceCount = "kubesphere_workspace_count"
|
||||
KubeSphereUserCount = "kubesphere_user_count"
|
||||
KubeSphereClusterCount = "kubesphere_cluser_count"
|
||||
KubeSphereAppTmplCount = "kubesphere_app_template_count"
|
||||
|
||||
WorkspaceNamespaceCount = "workspace_namespace_count"
|
||||
WorkspaceDevopsCount = "workspace_devops_project_count"
|
||||
WorkspaceMemberCount = "workspace_member_count"
|
||||
WorkspaceRoleCount = "workspace_role_count"
|
||||
|
||||
MetricMeterPrefix = "meter_"
|
||||
)
|
||||
|
||||
var ClusterMetrics = []string{
|
||||
"cluster_cpu_utilisation",
|
||||
"cluster_cpu_usage",
|
||||
"cluster_cpu_total",
|
||||
"cluster_memory_utilisation",
|
||||
"cluster_memory_available",
|
||||
"cluster_memory_total",
|
||||
"cluster_memory_usage_wo_cache",
|
||||
"cluster_net_utilisation",
|
||||
"cluster_net_bytes_transmitted",
|
||||
"cluster_net_bytes_received",
|
||||
"cluster_disk_read_iops",
|
||||
"cluster_disk_write_iops",
|
||||
"cluster_disk_read_throughput",
|
||||
"cluster_disk_write_throughput",
|
||||
"cluster_disk_size_usage",
|
||||
"cluster_disk_size_utilisation",
|
||||
"cluster_disk_size_capacity",
|
||||
"cluster_disk_size_available",
|
||||
"cluster_disk_inode_total",
|
||||
"cluster_disk_inode_usage",
|
||||
"cluster_disk_inode_utilisation",
|
||||
"cluster_namespace_count",
|
||||
"cluster_pod_count",
|
||||
"cluster_pod_quota",
|
||||
"cluster_pod_utilisation",
|
||||
"cluster_pod_running_count",
|
||||
"cluster_pod_succeeded_count",
|
||||
"cluster_pod_abnormal_count",
|
||||
"cluster_node_online",
|
||||
"cluster_node_offline",
|
||||
"cluster_node_total",
|
||||
"cluster_cronjob_count",
|
||||
"cluster_pvc_count",
|
||||
"cluster_daemonset_count",
|
||||
"cluster_deployment_count",
|
||||
"cluster_endpoint_count",
|
||||
"cluster_hpa_count",
|
||||
"cluster_job_count",
|
||||
"cluster_statefulset_count",
|
||||
"cluster_replicaset_count",
|
||||
"cluster_service_count",
|
||||
"cluster_secret_count",
|
||||
"cluster_pv_count",
|
||||
"cluster_ingresses_extensions_count",
|
||||
"cluster_load1",
|
||||
"cluster_load5",
|
||||
"cluster_load15",
|
||||
"cluster_pod_abnormal_ratio",
|
||||
"cluster_node_offline_ratio",
|
||||
|
||||
// meter
|
||||
"meter_cluster_cpu_usage",
|
||||
"meter_cluster_memory_usage",
|
||||
"meter_cluster_net_bytes_transmitted",
|
||||
"meter_cluster_net_bytes_received",
|
||||
"meter_cluster_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var NodeMetrics = []string{
|
||||
"node_cpu_utilisation",
|
||||
"node_cpu_total",
|
||||
"node_cpu_usage",
|
||||
"node_memory_utilisation",
|
||||
"node_memory_usage_wo_cache",
|
||||
"node_memory_available",
|
||||
"node_memory_total",
|
||||
"node_net_utilisation",
|
||||
"node_net_bytes_transmitted",
|
||||
"node_net_bytes_received",
|
||||
"node_disk_read_iops",
|
||||
"node_disk_write_iops",
|
||||
"node_disk_read_throughput",
|
||||
"node_disk_write_throughput",
|
||||
"node_disk_size_capacity",
|
||||
"node_disk_size_available",
|
||||
"node_disk_size_usage",
|
||||
"node_disk_size_utilisation",
|
||||
"node_disk_inode_total",
|
||||
"node_disk_inode_usage",
|
||||
"node_disk_inode_utilisation",
|
||||
"node_pod_count",
|
||||
"node_pod_quota",
|
||||
"node_pod_utilisation",
|
||||
"node_pod_running_count",
|
||||
"node_pod_succeeded_count",
|
||||
"node_pod_abnormal_count",
|
||||
"node_load1",
|
||||
"node_load5",
|
||||
"node_load15",
|
||||
"node_pod_abnormal_ratio",
|
||||
"node_pleg_quantile",
|
||||
|
||||
"node_device_size_usage",
|
||||
"node_device_size_utilisation",
|
||||
|
||||
// meter
|
||||
"meter_node_cpu_usage",
|
||||
"meter_node_memory_usage_wo_cache",
|
||||
"meter_node_net_bytes_transmitted",
|
||||
"meter_node_net_bytes_received",
|
||||
"meter_node_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var WorkspaceMetrics = []string{
|
||||
"workspace_cpu_usage",
|
||||
"workspace_memory_usage",
|
||||
"workspace_memory_usage_wo_cache",
|
||||
"workspace_net_bytes_transmitted",
|
||||
"workspace_net_bytes_received",
|
||||
"workspace_pod_count",
|
||||
"workspace_pod_running_count",
|
||||
"workspace_pod_succeeded_count",
|
||||
"workspace_pod_abnormal_count",
|
||||
"workspace_ingresses_extensions_count",
|
||||
"workspace_cronjob_count",
|
||||
"workspace_pvc_count",
|
||||
"workspace_daemonset_count",
|
||||
"workspace_deployment_count",
|
||||
"workspace_endpoint_count",
|
||||
"workspace_hpa_count",
|
||||
"workspace_job_count",
|
||||
"workspace_statefulset_count",
|
||||
"workspace_replicaset_count",
|
||||
"workspace_service_count",
|
||||
"workspace_secret_count",
|
||||
"workspace_pod_abnormal_ratio",
|
||||
|
||||
// meter
|
||||
"meter_workspace_cpu_usage",
|
||||
"meter_workspace_memory_usage",
|
||||
"meter_workspace_net_bytes_transmitted",
|
||||
"meter_workspace_net_bytes_received",
|
||||
"meter_workspace_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var NamespaceMetrics = []string{
|
||||
"namespace_cpu_usage",
|
||||
"namespace_memory_usage",
|
||||
"namespace_memory_usage_wo_cache",
|
||||
"namespace_net_bytes_transmitted",
|
||||
"namespace_net_bytes_received",
|
||||
"namespace_pod_count",
|
||||
"namespace_pod_running_count",
|
||||
"namespace_pod_succeeded_count",
|
||||
"namespace_pod_abnormal_count",
|
||||
"namespace_pod_abnormal_ratio",
|
||||
"namespace_memory_limit_hard",
|
||||
"namespace_cpu_limit_hard",
|
||||
"namespace_pod_count_hard",
|
||||
"namespace_cronjob_count",
|
||||
"namespace_pvc_count",
|
||||
"namespace_daemonset_count",
|
||||
"namespace_deployment_count",
|
||||
"namespace_endpoint_count",
|
||||
"namespace_hpa_count",
|
||||
"namespace_job_count",
|
||||
"namespace_statefulset_count",
|
||||
"namespace_replicaset_count",
|
||||
"namespace_service_count",
|
||||
"namespace_secret_count",
|
||||
"namespace_configmap_count",
|
||||
"namespace_ingresses_extensions_count",
|
||||
"namespace_s2ibuilder_count",
|
||||
|
||||
// meter
|
||||
"meter_namespace_cpu_usage",
|
||||
"meter_namespace_memory_usage_wo_cache",
|
||||
"meter_namespace_net_bytes_transmitted",
|
||||
"meter_namespace_net_bytes_received",
|
||||
"meter_namespace_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ApplicationMetrics = []string{
|
||||
|
||||
// meter
|
||||
"meter_application_cpu_usage",
|
||||
"meter_application_memory_usage_wo_cache",
|
||||
"meter_application_net_bytes_transmitted",
|
||||
"meter_application_net_bytes_received",
|
||||
"meter_application_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var WorkloadMetrics = []string{
|
||||
"workload_cpu_usage",
|
||||
"workload_memory_usage",
|
||||
"workload_memory_usage_wo_cache",
|
||||
"workload_net_bytes_transmitted",
|
||||
"workload_net_bytes_received",
|
||||
"workload_deployment_replica",
|
||||
"workload_deployment_replica_available",
|
||||
"workload_statefulset_replica",
|
||||
"workload_statefulset_replica_available",
|
||||
"workload_daemonset_replica",
|
||||
"workload_daemonset_replica_available",
|
||||
"workload_deployment_unavailable_replicas_ratio",
|
||||
"workload_daemonset_unavailable_replicas_ratio",
|
||||
"workload_statefulset_unavailable_replicas_ratio",
|
||||
|
||||
// meter
|
||||
"meter_workload_cpu_usage",
|
||||
"meter_workload_memory_usage_wo_cache",
|
||||
"meter_workload_net_bytes_transmitted",
|
||||
"meter_workload_net_bytes_received",
|
||||
"meter_workload_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ServiceMetrics = []string{
|
||||
// meter
|
||||
"meter_service_cpu_usage",
|
||||
"meter_service_memory_usage_wo_cache",
|
||||
"meter_service_net_bytes_transmitted",
|
||||
"meter_service_net_bytes_received",
|
||||
}
|
||||
|
||||
var PodMetrics = []string{
|
||||
"pod_cpu_usage",
|
||||
"pod_memory_usage",
|
||||
"pod_memory_usage_wo_cache",
|
||||
"pod_net_bytes_transmitted",
|
||||
"pod_net_bytes_received",
|
||||
|
||||
// meter
|
||||
"meter_pod_cpu_usage",
|
||||
"meter_pod_memory_usage_wo_cache",
|
||||
"meter_pod_net_bytes_transmitted",
|
||||
"meter_pod_net_bytes_received",
|
||||
"meter_pod_pvc_bytes_total",
|
||||
}
|
||||
|
||||
var ContainerMetrics = []string{
|
||||
"container_cpu_usage",
|
||||
"container_memory_usage",
|
||||
"container_memory_usage_wo_cache",
|
||||
"container_processes_usage",
|
||||
"container_threads_usage",
|
||||
}
|
||||
|
||||
var PVCMetrics = []string{
|
||||
"pvc_inodes_available",
|
||||
"pvc_inodes_used",
|
||||
"pvc_inodes_total",
|
||||
"pvc_inodes_utilisation",
|
||||
"pvc_bytes_available",
|
||||
"pvc_bytes_used",
|
||||
"pvc_bytes_total",
|
||||
"pvc_bytes_utilisation",
|
||||
}
|
||||
|
||||
var IngressMetrics = []string{
|
||||
"ingress_request_count",
|
||||
"ingress_request_5xx_count",
|
||||
"ingress_request_4xx_count",
|
||||
"ingress_active_connections",
|
||||
"ingress_success_rate",
|
||||
"ingress_request_duration_average",
|
||||
"ingress_request_duration_50percentage",
|
||||
"ingress_request_duration_95percentage",
|
||||
"ingress_request_duration_99percentage",
|
||||
"ingress_request_volume",
|
||||
"ingress_request_volume_by_ingress",
|
||||
"ingress_request_network_sent",
|
||||
"ingress_request_network_received",
|
||||
"ingress_request_memory_bytes",
|
||||
"ingress_request_cpu_usage",
|
||||
}
|
||||
|
||||
var EtcdMetrics = []string{
|
||||
"etcd_server_list",
|
||||
"etcd_server_total",
|
||||
"etcd_server_up_total",
|
||||
"etcd_server_has_leader",
|
||||
"etcd_server_is_leader",
|
||||
"etcd_server_leader_changes",
|
||||
"etcd_server_proposals_failed_rate",
|
||||
"etcd_server_proposals_applied_rate",
|
||||
"etcd_server_proposals_committed_rate",
|
||||
"etcd_server_proposals_pending_count",
|
||||
"etcd_mvcc_db_size",
|
||||
"etcd_network_client_grpc_received_bytes",
|
||||
"etcd_network_client_grpc_sent_bytes",
|
||||
"etcd_grpc_call_rate",
|
||||
"etcd_grpc_call_failed_rate",
|
||||
"etcd_grpc_server_msg_received_rate",
|
||||
"etcd_grpc_server_msg_sent_rate",
|
||||
"etcd_disk_wal_fsync_duration",
|
||||
"etcd_disk_wal_fsync_duration_quantile",
|
||||
"etcd_disk_backend_commit_duration",
|
||||
"etcd_disk_backend_commit_duration_quantile",
|
||||
}
|
||||
|
||||
var APIServerMetrics = []string{
|
||||
"apiserver_up_sum",
|
||||
"apiserver_request_rate",
|
||||
"apiserver_request_by_verb_rate",
|
||||
"apiserver_request_latencies",
|
||||
"apiserver_request_by_verb_latencies",
|
||||
}
|
||||
|
||||
var SchedulerMetrics = []string{
|
||||
"scheduler_up_sum",
|
||||
"scheduler_schedule_attempts",
|
||||
"scheduler_schedule_attempt_rate",
|
||||
"scheduler_e2e_scheduling_latency",
|
||||
"scheduler_e2e_scheduling_latency_quantile",
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
const (
|
||||
IdentifierNode = "node"
|
||||
IdentifierWorkspace = "workspace"
|
||||
IdentifierNamespace = "namespace"
|
||||
IdentifierWorkload = "workload"
|
||||
IdentifierPod = "pod"
|
||||
IdentifierContainer = "container"
|
||||
IdentifierPVC = "persistentvolumeclaim"
|
||||
IdentifierService = "service"
|
||||
IdentifierApplication = "application"
|
||||
IdentifierIngress = "ingress"
|
||||
|
||||
OrderAscending = "asc"
|
||||
OrderDescending = "desc"
|
||||
)
|
||||
|
||||
type wrapper struct {
|
||||
monitoring.MetricData
|
||||
identifier, order string
|
||||
}
|
||||
|
||||
func (w wrapper) Len() int {
|
||||
return len(w.MetricValues)
|
||||
}
|
||||
|
||||
func (w wrapper) Less(i, j int) bool {
|
||||
p := w.MetricValues[i]
|
||||
q := w.MetricValues[j]
|
||||
|
||||
// Place Nil to the tail.
|
||||
if p.Sample == nil && q.Sample != nil {
|
||||
return false
|
||||
}
|
||||
if p.Sample != nil && q.Sample == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// If both Samples are Nil or have the same metric value, sort by resource name
|
||||
if p.Sample == q.Sample || p.Sample[1] == q.Sample[1] {
|
||||
return p.Metadata[w.identifier] < q.Metadata[w.identifier]
|
||||
}
|
||||
// Place NaN to the tail (NaN takes precedence over Nil).
|
||||
if math.IsNaN(p.Sample[1]) != math.IsNaN(q.Sample[1]) {
|
||||
return !math.IsNaN(p.Sample[1])
|
||||
}
|
||||
|
||||
switch w.order {
|
||||
case OrderAscending:
|
||||
return p.Sample.Value() < q.Sample.Value()
|
||||
default:
|
||||
return p.Sample.Value() > q.Sample.Value()
|
||||
}
|
||||
}
|
||||
|
||||
func (id wrapper) Swap(i, j int) {
|
||||
id.MetricValues[i], id.MetricValues[j] = id.MetricValues[j], id.MetricValues[i]
|
||||
}
|
||||
|
||||
// SortMetrics sorts a group of resources by a given metric. Range query doesn't support ranking.
|
||||
// Example:
|
||||
//
|
||||
// Before sorting:
|
||||
// | ID | Metric 1 | Metric 2 | Metric 3 |
|
||||
// | a | 1 | XL | |
|
||||
// | b | 1 | S | |
|
||||
// | c | 3 | M | |
|
||||
//
|
||||
// After sorting: target=metric_2, order=asc, identifier=id
|
||||
// | ID | Metric 1 | Metric 2 (asc) | Metric 3 |
|
||||
// | a | 1 | XL | |
|
||||
// | c | 3 | M | |
|
||||
// | b | 1 | S | |
|
||||
func (raw *Metrics) Sort(target, order, identifier string) *Metrics {
|
||||
if target == "" || identifier == "" || len(raw.Results) == 0 {
|
||||
return raw
|
||||
}
|
||||
|
||||
resourceSet := make(map[string]bool) // resource set records possible values of the identifier
|
||||
resourceOrdinal := make(map[string]int) // resource-ordinal map
|
||||
|
||||
ordinal := 0
|
||||
for _, item := range raw.Results {
|
||||
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.MetricName == target {
|
||||
sort.Sort(wrapper{
|
||||
MetricData: item.MetricData,
|
||||
identifier: identifier,
|
||||
order: order,
|
||||
})
|
||||
|
||||
for _, mv := range item.MetricValues {
|
||||
// Record ordinals in the final result
|
||||
v, ok := mv.Metadata[identifier]
|
||||
if ok && v != "" {
|
||||
if _, ok := resourceOrdinal[v]; !ok {
|
||||
resourceOrdinal[v] = ordinal
|
||||
ordinal++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add every unique identifier value to the set
|
||||
for _, mv := range item.MetricValues {
|
||||
v, ok := mv.Metadata[identifier]
|
||||
if ok && v != "" {
|
||||
resourceSet[v] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resourceList []string
|
||||
for k := range resourceSet {
|
||||
resourceList = append(resourceList, k)
|
||||
}
|
||||
sort.Strings(resourceList)
|
||||
|
||||
// Fill resource-ordinal map with resources never present in the target, and give them ordinals.
|
||||
for _, r := range resourceList {
|
||||
if _, ok := resourceOrdinal[r]; !ok {
|
||||
resourceOrdinal[r] = ordinal
|
||||
ordinal++
|
||||
}
|
||||
}
|
||||
|
||||
// Sort metrics
|
||||
for i, item := range raw.Results {
|
||||
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
sorted := make([]monitoring.MetricValue, len(resourceList))
|
||||
for _, mv := range item.MetricValues {
|
||||
v, ok := mv.Metadata[identifier]
|
||||
if ok && v != "" {
|
||||
ordinal := resourceOrdinal[v]
|
||||
sorted[ordinal] = mv
|
||||
}
|
||||
}
|
||||
raw.Results[i].MetricValues = sorted
|
||||
}
|
||||
|
||||
raw.CurrentPage = 1
|
||||
raw.TotalPages = 1
|
||||
raw.TotalItems = len(resourceList)
|
||||
return raw
|
||||
}
|
||||
|
||||
func (raw *Metrics) Page(page, limit int) *Metrics {
|
||||
if page < 1 || limit < 1 || len(raw.Results) == 0 {
|
||||
return raw
|
||||
}
|
||||
|
||||
start := (page - 1) * limit
|
||||
end := page * limit
|
||||
|
||||
for i, item := range raw.Results {
|
||||
if item.MetricType != monitoring.MetricTypeVector || item.Error != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
total := len(item.MetricValues)
|
||||
if start >= total {
|
||||
raw.Results[i].MetricValues = nil
|
||||
continue
|
||||
}
|
||||
if end >= total {
|
||||
end = total
|
||||
}
|
||||
|
||||
raw.Results[i].MetricValues = item.MetricValues[start:end]
|
||||
}
|
||||
|
||||
raw.CurrentPage = page
|
||||
raw.TotalPages = int(math.Ceil(float64(raw.TotalItems) / float64(limit)))
|
||||
return raw
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
func TestSort(t *testing.T) {
|
||||
tests := []struct {
|
||||
target string
|
||||
order string
|
||||
identifier string
|
||||
raw string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
target: "node_cpu_utilisation",
|
||||
order: "asc",
|
||||
identifier: "node",
|
||||
raw: "source-node-metrics.json",
|
||||
expected: "sorted-node-metrics-asc.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "source-node-metrics.json",
|
||||
expected: "sorted-node-metrics-desc.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "faulty-node-metrics-1.json",
|
||||
expected: "faulty-node-metrics-sorted-1.json",
|
||||
},
|
||||
{
|
||||
target: "node_cpu_utilisation",
|
||||
order: "asc",
|
||||
identifier: "node",
|
||||
raw: "faulty-node-metrics-2.json",
|
||||
expected: "faulty-node-metrics-sorted-2.json",
|
||||
},
|
||||
{
|
||||
target: "node_cpu_utilisation",
|
||||
order: "asc",
|
||||
identifier: "node",
|
||||
raw: "faulty-node-metrics-3.json",
|
||||
expected: "faulty-node-metrics-sorted-3.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "blank-node-metrics.json",
|
||||
expected: "blank-node-metrics-sorted.json",
|
||||
},
|
||||
{
|
||||
target: "node_memory_utilisation",
|
||||
order: "desc",
|
||||
identifier: "node",
|
||||
raw: "null-node-metrics.json",
|
||||
expected: "null-node-metrics-sorted.json",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
source, expected, err := jsonFromFile(tt.raw, tt.expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result := source.Sort(tt.target, tt.order, tt.identifier)
|
||||
opt := cmp.Comparer(func(x, y float64) bool {
|
||||
return (math.IsNaN(x) && math.IsNaN(y)) || x == y
|
||||
})
|
||||
if diff := cmp.Diff(*result, *expected, opt); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPage(t *testing.T) {
|
||||
tests := []struct {
|
||||
page int
|
||||
limit int
|
||||
raw string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
page: 0,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "sorted-node-metrics-asc.json",
|
||||
},
|
||||
{
|
||||
page: 1,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "paged-node-metrics-1.json",
|
||||
},
|
||||
{
|
||||
page: 2,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "paged-node-metrics-2.json",
|
||||
},
|
||||
{
|
||||
page: 3,
|
||||
limit: 5,
|
||||
raw: "sorted-node-metrics-asc.json",
|
||||
expected: "paged-node-metrics-3.json",
|
||||
},
|
||||
{
|
||||
page: 1,
|
||||
limit: 2,
|
||||
raw: "faulty-node-metrics-sorted-1.json",
|
||||
expected: "faulty-node-metrics-paged.json",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
source, expected, err := jsonFromFile(tt.raw, tt.expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
result := source.Page(tt.page, tt.limit)
|
||||
if diff := cmp.Diff(*result, *expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func jsonFromFile(sourceFile, expectedFile string) (*Metrics, *Metrics, error) {
|
||||
sourceJson := &Metrics{}
|
||||
expectedJson := &Metrics{}
|
||||
|
||||
json, err := os.ReadFile(fmt.Sprintf("./testdata/%s", sourceFile))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = jsoniter.Unmarshal(json, sourceJson)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
json, err = os.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = jsoniter.Unmarshal(json, expectedJson)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return sourceJson, expectedJson, nil
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":1,
|
||||
"total_item":3
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": ""
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": ""
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_cpu_utilisation",
|
||||
"error": "error"
|
||||
},
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_cpu_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.03250000000007276"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.05066666666655995"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_cpu_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"+Inf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"-Inf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"NaN"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"error":"error"
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":2,
|
||||
"total_item":4
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"error":"error"
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":1,
|
||||
"total_item":4
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.03250000000007276"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05066666666655995"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":1,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"-Inf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"+Inf"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"NaN"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":1,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{},
|
||||
{},
|
||||
{}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page": 1,
|
||||
"total_page": 1,
|
||||
"total_item": 3
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.021645833333483702"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.03250000000007276"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05066666666655995"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.3335848564534758"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.12824588180084573"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":2,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":2,
|
||||
"total_page":2,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector"
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":3,
|
||||
"total_page":2,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.021645833333483702"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.03250000000007276"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05066666666655995"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.3335848564534758"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.12824588180084573"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":1,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
{
|
||||
"results":[
|
||||
{
|
||||
"metric_name":"node_cpu_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05066666666655995"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.03250000000007276"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.021645833333483702"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_disk_size_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.193,
|
||||
"0.3335848564534758"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name":"node_memory_utilisation",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-o13skypq"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-2dazc1d6"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-xfcxdn7z"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-hgcoippu"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ezjb7gsk"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-ircdnrao"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-9jtsi522"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric":{
|
||||
"node":"i-tl1i71hr"
|
||||
},
|
||||
"value":[
|
||||
1585658599.195,
|
||||
"0.12824588180084573"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"page":1,
|
||||
"total_page":1,
|
||||
"total_item":8
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"metric_name": "node_cpu_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.18095833333306172"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.03250000000007276"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.07443750000044626"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.05066666666655995"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.05210416666595847"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.07756249999996119"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.021645833333483702"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.06745833333334303"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_disk_size_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.42012898861983516"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2601006025131434"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.29849334024542695"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.2588273152865106"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.21351118996831508"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.4329682466178235"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.3335848564534758"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.193,
|
||||
"0.35981263055856705"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metric_name": "node_memory_utilisation",
|
||||
"data": {
|
||||
"resultType": "vector",
|
||||
"result": [
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-2dazc1d6"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.5286875837861773"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-9jtsi522"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.1446648505469157"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ezjb7gsk"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.23637090535053928"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-hgcoippu"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.2497060264216553"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-ircdnrao"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.21291125105270192"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-o13skypq"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.823247832787681"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-tl1i71hr"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.12824588180084573"
|
||||
]
|
||||
},
|
||||
{
|
||||
"metric": {
|
||||
"node": "i-xfcxdn7z"
|
||||
},
|
||||
"value": [
|
||||
1585658599.195,
|
||||
"0.40309723127991315"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
Results []monitoring.Metric `json:"results" description:"actual array of results"`
|
||||
CurrentPage int `json:"page,omitempty" description:"current page returned"`
|
||||
TotalPages int `json:"total_page,omitempty" description:"total number of pages"`
|
||||
TotalItems int `json:"total_item,omitempty" description:"page size"`
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Data []monitoring.Metadata `json:"data" description:"actual array of results"`
|
||||
}
|
||||
|
||||
type MetricLabelSet struct {
|
||||
Data []map[string]string `json:"data" description:"actual array of results"`
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
meteringclient "kubesphere.io/kubesphere/pkg/simple/client/metering"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
const (
|
||||
METER_RESOURCE_TYPE_CPU = iota
|
||||
METER_RESOURCE_TYPE_MEM
|
||||
METER_RESOURCE_TYPE_NET_INGRESS
|
||||
METER_RESOURCE_TYPE_NET_EGRESS
|
||||
METER_RESOURCE_TYPE_PVC
|
||||
|
||||
meteringDefaultPrecision = 10
|
||||
meteringFeePrecision = 3
|
||||
)
|
||||
|
||||
var meterResourceUnitMap = map[int]string{
|
||||
METER_RESOURCE_TYPE_CPU: "cores",
|
||||
METER_RESOURCE_TYPE_MEM: "bytes",
|
||||
METER_RESOURCE_TYPE_NET_INGRESS: "bytes",
|
||||
METER_RESOURCE_TYPE_NET_EGRESS: "bytes",
|
||||
METER_RESOURCE_TYPE_PVC: "bytes",
|
||||
}
|
||||
|
||||
var MeterResourceMap = map[string]int{
|
||||
"meter_cluster_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_cluster_memory_usage": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_cluster_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_cluster_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_cluster_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_node_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_node_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_node_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_node_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_node_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_workspace_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_workspace_memory_usage": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_workspace_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_workspace_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_workspace_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_namespace_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_namespace_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_namespace_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_namespace_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_namespace_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_application_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_application_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_application_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_application_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_application_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_workload_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_workload_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_workload_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_workload_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_workload_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
"meter_service_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_service_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_service_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_service_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_pod_cpu_usage": METER_RESOURCE_TYPE_CPU,
|
||||
"meter_pod_memory_usage_wo_cache": METER_RESOURCE_TYPE_MEM,
|
||||
"meter_pod_net_bytes_transmitted": METER_RESOURCE_TYPE_NET_EGRESS,
|
||||
"meter_pod_net_bytes_received": METER_RESOURCE_TYPE_NET_INGRESS,
|
||||
"meter_pod_pvc_bytes_total": METER_RESOURCE_TYPE_PVC,
|
||||
}
|
||||
|
||||
func getMaxPointValue(points []monitoring.Point) string {
|
||||
var max *big.Float
|
||||
for i, p := range points {
|
||||
if i == 0 {
|
||||
max = new(big.Float).SetFloat64(p.Value())
|
||||
}
|
||||
|
||||
pf := new(big.Float).SetFloat64(p.Value())
|
||||
if pf.Cmp(max) == 1 {
|
||||
max = pf
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), max)
|
||||
}
|
||||
|
||||
func getMinPointValue(points []monitoring.Point) string {
|
||||
var min *big.Float
|
||||
for i, p := range points {
|
||||
if i == 0 {
|
||||
min = new(big.Float).SetFloat64(p.Value())
|
||||
}
|
||||
|
||||
pf := new(big.Float).SetFloat64(p.Value())
|
||||
if min.Cmp(pf) == 1 {
|
||||
min = pf
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), min)
|
||||
}
|
||||
|
||||
func getSumPointValue(points []monitoring.Point) string {
|
||||
sum := new(big.Float).SetFloat64(0)
|
||||
|
||||
for _, p := range points {
|
||||
pf := new(big.Float).SetFloat64(p.Value())
|
||||
sum.Add(sum, pf)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), sum)
|
||||
}
|
||||
|
||||
func getAvgPointValue(points []monitoring.Point) string {
|
||||
sum, ok := new(big.Float).SetString(getSumPointValue(points))
|
||||
if !ok {
|
||||
klog.Error("failed to parse big.Float")
|
||||
return ""
|
||||
}
|
||||
|
||||
length := new(big.Float).SetFloat64(float64(len(points)))
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringDefaultPrecision), sum.Quo(sum, length))
|
||||
}
|
||||
|
||||
func generateFloatFormat(precision int) string {
|
||||
return "%." + fmt.Sprintf("%d", precision) + "f"
|
||||
}
|
||||
|
||||
func getResourceUnit(meterName string) string {
|
||||
if resourceType, ok := MeterResourceMap[meterName]; !ok {
|
||||
klog.Errorf("invlaid meter %v", meterName)
|
||||
return ""
|
||||
} else {
|
||||
return meterResourceUnitMap[resourceType]
|
||||
}
|
||||
}
|
||||
|
||||
func getFeeWithMeterName(meterName string, sum string, priceInfo meteringclient.PriceInfo) string {
|
||||
|
||||
s, ok := new(big.Float).SetString(sum)
|
||||
if !ok {
|
||||
klog.Error("failed to parse string to float")
|
||||
return ""
|
||||
}
|
||||
|
||||
if resourceType, ok := MeterResourceMap[meterName]; !ok {
|
||||
klog.Errorf("invlaid meter %v", meterName)
|
||||
return ""
|
||||
} else {
|
||||
switch resourceType {
|
||||
case METER_RESOURCE_TYPE_CPU:
|
||||
CpuPerCorePerHour := new(big.Float).SetFloat64(priceInfo.CpuPerCorePerHour)
|
||||
tmp := s.Mul(s, CpuPerCorePerHour)
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringFeePrecision), tmp)
|
||||
case METER_RESOURCE_TYPE_MEM:
|
||||
oneGiga := new(big.Float).SetInt64(1073741824)
|
||||
MemPerGigabytesPerHour := new(big.Float).SetFloat64(priceInfo.MemPerGigabytesPerHour)
|
||||
|
||||
// transform unit from bytes to Gigabytes
|
||||
s.Quo(s, oneGiga)
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringFeePrecision), s.Mul(s, MemPerGigabytesPerHour))
|
||||
case METER_RESOURCE_TYPE_NET_INGRESS:
|
||||
oneMega := new(big.Float).SetInt64(1048576)
|
||||
IngressNetworkTrafficPerMegabytesPerHour := new(big.Float).SetFloat64(priceInfo.IngressNetworkTrafficPerMegabytesPerHour)
|
||||
|
||||
// transform unit from bytes to Migabytes
|
||||
s.Quo(s, oneMega)
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringFeePrecision), s.Mul(s, IngressNetworkTrafficPerMegabytesPerHour))
|
||||
case METER_RESOURCE_TYPE_NET_EGRESS:
|
||||
oneMega := new(big.Float).SetInt64(1048576)
|
||||
EgressNetworkTrafficPerMegabytesPerHour := new(big.Float).SetPrec(meteringFeePrecision).SetFloat64(priceInfo.EgressNetworkTrafficPerMegabytesPerHour)
|
||||
|
||||
// transform unit from bytes to Migabytes
|
||||
s.Quo(s, oneMega)
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringFeePrecision), s.Mul(s, EgressNetworkTrafficPerMegabytesPerHour))
|
||||
case METER_RESOURCE_TYPE_PVC:
|
||||
oneGiga := new(big.Float).SetInt64(1073741824)
|
||||
PvcPerGigabytesPerHour := new(big.Float).SetPrec(meteringFeePrecision).SetFloat64(priceInfo.PvcPerGigabytesPerHour)
|
||||
|
||||
// transform unit from bytes to Gigabytes
|
||||
s.Quo(s, oneGiga)
|
||||
|
||||
return fmt.Sprintf(generateFloatFormat(meteringFeePrecision), s.Mul(s, PvcPerGigabytesPerHour))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float64, priceInfo meteringclient.PriceInfo) monitoring.MetricData {
|
||||
metricName := metric.MetricName
|
||||
metricData := metric.MetricData
|
||||
for index, metricValue := range metricData.MetricValues {
|
||||
|
||||
// calculate min, max, avg value first, then squash points with factor
|
||||
if metricData.MetricType == monitoring.MetricTypeMatrix {
|
||||
metricData.MetricValues[index].MinValue = getMinPointValue(metricValue.Series)
|
||||
metricData.MetricValues[index].MaxValue = getMaxPointValue(metricValue.Series)
|
||||
metricData.MetricValues[index].AvgValue = getAvgPointValue(metricValue.Series)
|
||||
} else {
|
||||
metricData.MetricValues[index].MinValue = getMinPointValue([]monitoring.Point{*metricValue.Sample})
|
||||
metricData.MetricValues[index].MaxValue = getMaxPointValue([]monitoring.Point{*metricValue.Sample})
|
||||
metricData.MetricValues[index].AvgValue = getAvgPointValue([]monitoring.Point{*metricValue.Sample})
|
||||
}
|
||||
|
||||
// squash points if step is more than one hour and calculate sum and fee
|
||||
var factor float64 = 1
|
||||
if scalingMap != nil {
|
||||
factor = scalingMap[metricName]
|
||||
}
|
||||
metricData.MetricValues[index].Series = squashPoints(metricData.MetricValues[index].Series, int(factor))
|
||||
|
||||
if metricData.MetricType == monitoring.MetricTypeMatrix {
|
||||
sum := getSumPointValue(metricData.MetricValues[index].Series)
|
||||
metricData.MetricValues[index].SumValue = sum
|
||||
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum, priceInfo)
|
||||
} else {
|
||||
sum := getSumPointValue([]monitoring.Point{*metricValue.Sample})
|
||||
metricData.MetricValues[index].SumValue = sum
|
||||
metricData.MetricValues[index].Fee = getFeeWithMeterName(metricName, sum, priceInfo)
|
||||
}
|
||||
|
||||
metricData.MetricValues[index].CurrencyUnit = priceInfo.CurrencyUnit
|
||||
metricData.MetricValues[index].ResourceUnit = getResourceUnit(metricName)
|
||||
|
||||
}
|
||||
return metricData
|
||||
}
|
||||
|
||||
func squashPoints(input []monitoring.Point, factor int) (output []monitoring.Point) {
|
||||
|
||||
if factor <= 0 {
|
||||
klog.Errorln("factor should be positive")
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(input); i++ {
|
||||
|
||||
if i%factor == 0 {
|
||||
output = append([]monitoring.Point{input[len(input)-1-i]}, output...)
|
||||
} else {
|
||||
output[0] = output[0].Add(input[len(input)-1-i])
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
@@ -1,392 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
meteringclient "kubesphere.io/kubesphere/pkg/simple/client/metering"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
)
|
||||
|
||||
func TestGetMaxPointValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
actualPoints []monitoring.Point
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{1.0, 2.0},
|
||||
{3.0, 4.0},
|
||||
},
|
||||
expectedValue: "4.0000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{2, 1},
|
||||
{4, 3.1},
|
||||
},
|
||||
expectedValue: "3.1000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{5, 100},
|
||||
{6, 100000.001},
|
||||
},
|
||||
expectedValue: "100000.0010000000",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
max := getMaxPointValue(tt.actualPoints)
|
||||
if max != tt.expectedValue {
|
||||
t.Fatal("max point value caculation is wrong.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMinPointValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
actualPoints []monitoring.Point
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{1.0, 2.0},
|
||||
{3.0, 4.0},
|
||||
},
|
||||
expectedValue: "2.0000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{2, 1},
|
||||
{4, 3.1},
|
||||
},
|
||||
expectedValue: "1.0000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{5, 100},
|
||||
{6, 100000.001},
|
||||
},
|
||||
expectedValue: "100.0000000000",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
max := getMinPointValue(tt.actualPoints)
|
||||
if max != tt.expectedValue {
|
||||
t.Fatal("min point value caculation is wrong.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSumPointValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
actualPoints []monitoring.Point
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{1.0, 2.0},
|
||||
{3.0, 4.0},
|
||||
},
|
||||
expectedValue: "6.0000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{2, 1},
|
||||
{4, 3.1},
|
||||
},
|
||||
expectedValue: "4.1000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{5, 100},
|
||||
{6, 100000.001},
|
||||
},
|
||||
expectedValue: "100100.0010000000",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
max := getSumPointValue(tt.actualPoints)
|
||||
if max != tt.expectedValue {
|
||||
t.Fatal("sum point value caculation is wrong.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAvgPointValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
actualPoints []monitoring.Point
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{1.0, 2.0},
|
||||
{3.0, 4.0},
|
||||
},
|
||||
expectedValue: "3.0000000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{2, 1},
|
||||
{4, 3.1},
|
||||
},
|
||||
expectedValue: "2.0500000000",
|
||||
},
|
||||
{
|
||||
actualPoints: []monitoring.Point{
|
||||
{5, 100},
|
||||
{6, 100000.001},
|
||||
},
|
||||
expectedValue: "50050.0005000000",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
max := getAvgPointValue(tt.actualPoints)
|
||||
if max != tt.expectedValue {
|
||||
t.Fatal("avg point value caculattion is wrong.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateFloatFormat(t *testing.T) {
|
||||
format := generateFloatFormat(10)
|
||||
if format != "%.10f" {
|
||||
t.Fatalf("get currency float format failed, %s", format)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetResourceUnit(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
meterName string
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
meterName: "no-exist",
|
||||
expectedValue: "",
|
||||
},
|
||||
{
|
||||
meterName: "meter_cluster_cpu_usage",
|
||||
expectedValue: "cores",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if getResourceUnit(tt.meterName) != tt.expectedValue {
|
||||
t.Fatal("get resource unit failed")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSquashPoints(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
input []monitoring.Point
|
||||
factor int
|
||||
expected []monitoring.Point
|
||||
}{
|
||||
{
|
||||
input: []monitoring.Point{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
{3, 3},
|
||||
{4, 4},
|
||||
{5, 5},
|
||||
{6, 6},
|
||||
{7, 7},
|
||||
{8, 8},
|
||||
},
|
||||
factor: 1,
|
||||
expected: []monitoring.Point{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
{3, 3},
|
||||
{4, 4},
|
||||
{5, 5},
|
||||
{6, 6},
|
||||
{7, 7},
|
||||
{8, 8},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: []monitoring.Point{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
{3, 3},
|
||||
{4, 4},
|
||||
{5, 5},
|
||||
{6, 6},
|
||||
{7, 7},
|
||||
{8, 8},
|
||||
},
|
||||
factor: 2,
|
||||
expected: []monitoring.Point{
|
||||
{2, 3},
|
||||
{4, 7},
|
||||
{6, 11},
|
||||
{8, 15},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := squashPoints(tt.input, tt.factor)
|
||||
if diff := cmp.Diff(got, tt.expected); diff != "" {
|
||||
t.Errorf("%T differ (-got, +want): %s", tt.expected, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFeeWithMeterName(t *testing.T) {
|
||||
|
||||
priceInfo := meteringclient.PriceInfo{
|
||||
IngressNetworkTrafficPerMegabytesPerHour: 1,
|
||||
EgressNetworkTrafficPerMegabytesPerHour: 2,
|
||||
CpuPerCorePerHour: 3,
|
||||
MemPerGigabytesPerHour: 4,
|
||||
PvcPerGigabytesPerHour: 5,
|
||||
CurrencyUnit: "CNY",
|
||||
}
|
||||
|
||||
if getFeeWithMeterName("meter_cluster_cpu_usage", "1", priceInfo) != "3.000" {
|
||||
t.Error("failed to get fee with meter_cluster_cpu_usage")
|
||||
return
|
||||
}
|
||||
if getFeeWithMeterName("meter_cluster_memory_usage", "0", priceInfo) != "0.000" {
|
||||
t.Error("failed to get fee with meter_cluster_memory_usage")
|
||||
return
|
||||
}
|
||||
if getFeeWithMeterName("meter_cluster_net_bytes_transmitted", "0", priceInfo) != "0.000" {
|
||||
t.Error("failed to get fee with meter_cluster_net_bytes_transmitted")
|
||||
return
|
||||
}
|
||||
if getFeeWithMeterName("meter_cluster_net_bytes_received", "0", priceInfo) != "0.000" {
|
||||
t.Error("failed to get fee with meter_cluster_net_bytes_received")
|
||||
return
|
||||
}
|
||||
if getFeeWithMeterName("meter_cluster_pvc_bytes_total", "0", priceInfo) != "0.000" {
|
||||
t.Error("failed to get fee with meter_cluster_pvc_bytes_total")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateMetricStatData(t *testing.T) {
|
||||
|
||||
priceInfo := meteringclient.PriceInfo{
|
||||
IngressNetworkTrafficPerMegabytesPerHour: 1,
|
||||
EgressNetworkTrafficPerMegabytesPerHour: 2,
|
||||
CpuPerCorePerHour: 3,
|
||||
MemPerGigabytesPerHour: 4,
|
||||
PvcPerGigabytesPerHour: 5,
|
||||
CurrencyUnit: "CNY",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
metric monitoring.Metric
|
||||
scalingMap map[string]float64
|
||||
expected monitoring.MetricData
|
||||
}{
|
||||
{
|
||||
metric: monitoring.Metric{
|
||||
MetricName: "test",
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeMatrix,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Metadata: map[string]string{},
|
||||
Series: []monitoring.Point{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scalingMap: map[string]float64{
|
||||
"test": 1,
|
||||
},
|
||||
expected: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeMatrix,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Metadata: map[string]string{},
|
||||
Series: []monitoring.Point{
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
},
|
||||
MinValue: "1.0000000000",
|
||||
MaxValue: "2.0000000000",
|
||||
AvgValue: "1.5000000000",
|
||||
SumValue: "3.0000000000",
|
||||
CurrencyUnit: "CNY",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
metric: monitoring.Metric{
|
||||
MetricName: "test",
|
||||
MetricData: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Metadata: map[string]string{},
|
||||
Sample: &monitoring.Point{1, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
scalingMap: nil,
|
||||
expected: monitoring.MetricData{
|
||||
MetricType: monitoring.MetricTypeVector,
|
||||
MetricValues: []monitoring.MetricValue{
|
||||
{
|
||||
Metadata: map[string]string{},
|
||||
Sample: &monitoring.Point{1, 2},
|
||||
MinValue: "2.0000000000",
|
||||
MaxValue: "2.0000000000",
|
||||
AvgValue: "2.0000000000",
|
||||
SumValue: "2.0000000000",
|
||||
CurrencyUnit: "CNY",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := updateMetricStatData(test.metric, test.scalingMap, priceInfo)
|
||||
if diff := cmp.Diff(got, test.expected); diff != "" {
|
||||
t.Errorf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,742 +0,0 @@
|
||||
// Copyright 2022 The KubeSphere Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package notification
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"kubesphere.io/api/notification/v2beta1"
|
||||
"kubesphere.io/api/notification/v2beta2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/resource"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/notification"
|
||||
)
|
||||
|
||||
const (
|
||||
Secret = "secrets"
|
||||
ConfigMap = "configmaps"
|
||||
VerificationAPIPath = "/api/v2/verify"
|
||||
|
||||
V2beta1 = "v2beta1"
|
||||
V2beta2 = "v2beta2"
|
||||
)
|
||||
|
||||
type Operator interface {
|
||||
ListV2beta1(user, resource, subresource string, query *query.Query) (*api.ListResult, error)
|
||||
GetV2beta1(user, resource, name, subresource string) (runtime.Object, error)
|
||||
CreateV2beta1(user, resource string, obj runtime.Object) (runtime.Object, error)
|
||||
DeleteV2beta1(user, resource, name string) error
|
||||
UpdateV2beta1(user, resource, name string, obj runtime.Object) (runtime.Object, error)
|
||||
|
||||
List(user, resource, subresource string, query *query.Query) (*api.ListResult, error)
|
||||
Get(user, resource, name, subresource string) (runtime.Object, error)
|
||||
Create(user, resource string, obj runtime.Object) (runtime.Object, error)
|
||||
Delete(user, resource, name string) error
|
||||
Update(user, resource, name string, obj runtime.Object) (runtime.Object, error)
|
||||
Patch(user, resource, name string, data []byte) (runtime.Object, error)
|
||||
|
||||
Verify(request *restful.Request, response *restful.Response)
|
||||
|
||||
GetObject(resource, version string) runtime.Object
|
||||
IsKnownResource(resource, version, subresource string) bool
|
||||
}
|
||||
|
||||
type operator struct {
|
||||
k8sClient kubernetes.Interface
|
||||
ksClient kubesphere.Interface
|
||||
informers informers.InformerFactory
|
||||
resourceGetter *resource.ResourceGetter
|
||||
options *notification.Options
|
||||
}
|
||||
|
||||
type Data struct {
|
||||
Config v2beta2.Config `json:"config"`
|
||||
Receiver v2beta2.Receiver `json:"receiver"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Code int `json:"Status"`
|
||||
Message string `json:"Message"`
|
||||
}
|
||||
|
||||
func NewOperator(
|
||||
informers informers.InformerFactory,
|
||||
k8sClient kubernetes.Interface,
|
||||
ksClient kubesphere.Interface,
|
||||
options *notification.Options) Operator {
|
||||
|
||||
return &operator{
|
||||
informers: informers,
|
||||
k8sClient: k8sClient,
|
||||
ksClient: ksClient,
|
||||
resourceGetter: resource.NewResourceGetter(informers, nil),
|
||||
options: options,
|
||||
}
|
||||
}
|
||||
|
||||
// ListV2beta1 list objects of version v2beta1. Only global objects will be returned if the user is nil.
|
||||
// If the user is not nil, only tenant objects whose tenant label matches the user will be returned.
|
||||
func (o *operator) ListV2beta1(user, resource, subresource string, q *query.Query) (*api.ListResult, error) {
|
||||
return o.list(user, resource, V2beta1, subresource, q)
|
||||
}
|
||||
|
||||
// List objects. Only global objects will be returned if the user is nil.
|
||||
// If the user is not nil, only tenant objects whose tenant label matches the user will be returned.
|
||||
func (o *operator) List(user, resource, subresource string, q *query.Query) (*api.ListResult, error) {
|
||||
return o.list(user, resource, V2beta2, subresource, q)
|
||||
}
|
||||
|
||||
func (o *operator) list(user, resource, version, subresource string, q *query.Query) (*api.ListResult, error) {
|
||||
|
||||
if user != "" {
|
||||
if resource == v2beta2.ResourcesPluralRouter ||
|
||||
resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return nil, errors.NewForbidden(v2beta2.Resource(resource), "",
|
||||
fmt.Errorf("tenant can not list %s", resource))
|
||||
}
|
||||
}
|
||||
|
||||
q.LabelSelector = o.generateLabelSelector(q, user, resource, version)
|
||||
|
||||
ns := ""
|
||||
if resource == Secret || resource == ConfigMap {
|
||||
ns = constants.NotificationSecretNamespace
|
||||
}
|
||||
|
||||
res, err := o.resourceGetter.List(resource, ns, q)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if subresource == "" ||
|
||||
(resource != v2beta2.ResourcesPluralConfig && resource != v2beta2.ResourcesPluralReceiver) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
results := &api.ListResult{}
|
||||
for _, item := range res.Items {
|
||||
obj := clean(item, resource, subresource)
|
||||
if obj != nil {
|
||||
if version == V2beta1 {
|
||||
obj = convert(obj)
|
||||
}
|
||||
results.Items = append(results.Items, obj)
|
||||
}
|
||||
}
|
||||
results.TotalItems = len(results.Items)
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (o *operator) generateLabelSelector(q *query.Query, user, resource, version string) string {
|
||||
|
||||
if resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return q.LabelSelector
|
||||
}
|
||||
|
||||
labelSelector := q.LabelSelector
|
||||
if len(labelSelector) > 0 {
|
||||
labelSelector = q.LabelSelector + ","
|
||||
}
|
||||
|
||||
filter := ""
|
||||
// If user is nil, it will list all global object.
|
||||
if user == "" {
|
||||
if isConfig(o.GetObject(resource, version)) {
|
||||
filter = "type=default"
|
||||
} else {
|
||||
filter = "type=global"
|
||||
}
|
||||
} else {
|
||||
// If the user is not nil, only return the object belong to this user.
|
||||
filter = "type=tenant,user=" + user
|
||||
}
|
||||
|
||||
labelSelector = labelSelector + filter
|
||||
return labelSelector
|
||||
}
|
||||
|
||||
// GetV2beta1 get the specified object of version v2beta1, if you want to get a global object, the user must be nil.
|
||||
// If you want to get a tenant object, the user must equal to the tenant specified in labels of the object.
|
||||
func (o *operator) GetV2beta1(user, resource, name, subresource string) (runtime.Object, error) {
|
||||
return o.get(user, resource, V2beta1, name, subresource)
|
||||
}
|
||||
|
||||
// Get the specified object, if you want to get a global object, the user must be nil.
|
||||
// If you want to get a tenant object, the user must equal to the tenant specified in labels of the object.
|
||||
func (o *operator) Get(user, resource, name, subresource string) (runtime.Object, error) {
|
||||
return o.get(user, resource, V2beta2, name, subresource)
|
||||
}
|
||||
|
||||
func (o *operator) get(user, resource, version, name, subresource string) (runtime.Object, error) {
|
||||
|
||||
if user != "" {
|
||||
if resource == v2beta2.ResourcesPluralRouter ||
|
||||
resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return nil, errors.NewForbidden(v2beta2.Resource(resource), "",
|
||||
fmt.Errorf("tenant can not get %s", resource))
|
||||
}
|
||||
}
|
||||
|
||||
ns := ""
|
||||
if resource == Secret || resource == ConfigMap {
|
||||
ns = constants.NotificationSecretNamespace
|
||||
}
|
||||
|
||||
obj, err := o.resourceGetter.Get(resource, ns, name)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := authorizer(user, obj); err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if subresource == "" ||
|
||||
(resource != v2beta2.ResourcesPluralConfig && resource != v2beta2.ResourcesPluralReceiver) {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
res := clean(obj, resource, subresource)
|
||||
if res == nil {
|
||||
return nil, errors.NewNotFound(v2beta1.Resource(obj.GetObjectKind().GroupVersionKind().GroupKind().Kind), name)
|
||||
}
|
||||
|
||||
if version == V2beta1 {
|
||||
res = convert(res)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// CreateV2beta1 an object of version v2beta1. A global object will be created if the user is nil.
|
||||
// A tenant object will be created if the user is not nil.
|
||||
func (o *operator) CreateV2beta1(user, resource string, obj runtime.Object) (runtime.Object, error) {
|
||||
return o.create(user, resource, V2beta1, obj)
|
||||
}
|
||||
|
||||
// Create an object. A global object will be created if the user is nil.
|
||||
// A tenant object will be created if the user is not nil.
|
||||
func (o *operator) Create(user, resource string, obj runtime.Object) (runtime.Object, error) {
|
||||
return o.create(user, resource, V2beta2, obj)
|
||||
}
|
||||
|
||||
func (o *operator) create(user, resource, version string, obj runtime.Object) (runtime.Object, error) {
|
||||
|
||||
if user != "" {
|
||||
if resource == v2beta2.ResourcesPluralRouter ||
|
||||
resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return nil, errors.NewForbidden(v2beta2.Resource(resource), "",
|
||||
fmt.Errorf("tenant can not create %s", resource))
|
||||
}
|
||||
}
|
||||
|
||||
if err := appendLabel(user, resource, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resource {
|
||||
case v2beta2.ResourcesPluralNotificationManager:
|
||||
return o.ksClient.NotificationV2beta2().NotificationManagers().Create(context.Background(), obj.(*v2beta2.NotificationManager), v1.CreateOptions{})
|
||||
case v2beta2.ResourcesPluralConfig:
|
||||
if version == V2beta1 {
|
||||
return o.ksClient.NotificationV2beta1().Configs().Create(context.Background(), obj.(*v2beta1.Config), v1.CreateOptions{})
|
||||
} else {
|
||||
return o.ksClient.NotificationV2beta2().Configs().Create(context.Background(), obj.(*v2beta2.Config), v1.CreateOptions{})
|
||||
}
|
||||
case v2beta2.ResourcesPluralReceiver:
|
||||
if version == V2beta1 {
|
||||
return o.ksClient.NotificationV2beta1().Receivers().Create(context.Background(), obj.(*v2beta1.Receiver), v1.CreateOptions{})
|
||||
} else {
|
||||
return o.ksClient.NotificationV2beta2().Receivers().Create(context.Background(), obj.(*v2beta2.Receiver), v1.CreateOptions{})
|
||||
}
|
||||
case v2beta2.ResourcesPluralRouter:
|
||||
return o.ksClient.NotificationV2beta2().Routers().Create(context.Background(), obj.(*v2beta2.Router), v1.CreateOptions{})
|
||||
case v2beta2.ResourcesPluralSilence:
|
||||
return o.ksClient.NotificationV2beta2().Silences().Create(context.Background(), obj.(*v2beta2.Silence), v1.CreateOptions{})
|
||||
case Secret:
|
||||
return o.k8sClient.CoreV1().Secrets(constants.NotificationSecretNamespace).Create(context.Background(), obj.(*corev1.Secret), v1.CreateOptions{})
|
||||
case ConfigMap:
|
||||
return o.k8sClient.CoreV1().ConfigMaps(constants.NotificationSecretNamespace).Create(context.Background(), obj.(*corev1.ConfigMap), v1.CreateOptions{})
|
||||
default:
|
||||
return nil, errors.NewInternalError(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteV2beta1 an object of version v2beta1. A global object will be deleted if the user is nil.
|
||||
// If the user is not nil, a tenant object whose tenant label matches the user will be deleted.
|
||||
func (o *operator) DeleteV2beta1(user, resource, name string) error {
|
||||
return o.delete(user, resource, name)
|
||||
}
|
||||
|
||||
// Delete an object. A global object will be deleted if the user is nil.
|
||||
// If the user is not nil, a tenant object whose tenant label matches the user will be deleted.
|
||||
func (o *operator) Delete(user, resource, name string) error {
|
||||
return o.delete(user, resource, name)
|
||||
}
|
||||
|
||||
func (o *operator) delete(user, resource, name string) error {
|
||||
|
||||
if user != "" {
|
||||
if resource == v2beta2.ResourcesPluralRouter ||
|
||||
resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return errors.NewForbidden(v2beta2.Resource(resource), "",
|
||||
fmt.Errorf("tenant can not delete %s", resource))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := o.Get(user, resource, name, ""); err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch resource {
|
||||
case v2beta2.ResourcesPluralNotificationManager:
|
||||
return o.ksClient.NotificationV2beta2().NotificationManagers().Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
case v2beta2.ResourcesPluralConfig:
|
||||
return o.ksClient.NotificationV2beta2().Configs().Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
case v2beta2.ResourcesPluralReceiver:
|
||||
return o.ksClient.NotificationV2beta2().Receivers().Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
case v2beta2.ResourcesPluralRouter:
|
||||
return o.ksClient.NotificationV2beta2().Routers().Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
case v2beta2.ResourcesPluralSilence:
|
||||
return o.ksClient.NotificationV2beta2().Silences().Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
case Secret:
|
||||
return o.k8sClient.CoreV1().Secrets(constants.NotificationSecretNamespace).Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
case ConfigMap:
|
||||
return o.k8sClient.CoreV1().ConfigMaps(constants.NotificationSecretNamespace).Delete(context.Background(), name, v1.DeleteOptions{})
|
||||
default:
|
||||
return errors.NewInternalError(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateV2beta1 an object of version v2beta1, only a global object will be updated if the user is nil.
|
||||
// If the user is not nil, a tenant object whose tenant label matches the user will be updated.
|
||||
func (o *operator) UpdateV2beta1(user, resource, name string, obj runtime.Object) (runtime.Object, error) {
|
||||
return o.update(user, resource, V2beta1, name, obj)
|
||||
}
|
||||
|
||||
// Update an object, only a global object will be updated if the user is nil.
|
||||
// If the user is not nil, a tenant object whose tenant label matches the user will be updated.
|
||||
func (o *operator) Update(user, resource, name string, obj runtime.Object) (runtime.Object, error) {
|
||||
return o.update(user, resource, V2beta2, name, obj)
|
||||
}
|
||||
|
||||
func (o *operator) update(user, resource, version, name string, obj runtime.Object) (runtime.Object, error) {
|
||||
|
||||
if user != "" {
|
||||
if resource == v2beta2.ResourcesPluralRouter ||
|
||||
resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return nil, errors.NewForbidden(v2beta2.Resource(resource), "",
|
||||
fmt.Errorf("tenant can not update %s", resource))
|
||||
}
|
||||
}
|
||||
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if accessor.GetName() != name {
|
||||
return nil, fmt.Errorf("incorrcet parameter, resource name is not equal to the name in body")
|
||||
}
|
||||
|
||||
_, err = o.Get(user, resource, name, "")
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := appendLabel(user, resource, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resource {
|
||||
case v2beta2.ResourcesPluralNotificationManager:
|
||||
return o.ksClient.NotificationV2beta2().NotificationManagers().Update(context.Background(), obj.(*v2beta2.NotificationManager), v1.UpdateOptions{})
|
||||
case v2beta2.ResourcesPluralConfig:
|
||||
if version == V2beta1 {
|
||||
return o.ksClient.NotificationV2beta1().Configs().Update(context.Background(), obj.(*v2beta1.Config), v1.UpdateOptions{})
|
||||
} else {
|
||||
return o.ksClient.NotificationV2beta2().Configs().Update(context.Background(), obj.(*v2beta2.Config), v1.UpdateOptions{})
|
||||
}
|
||||
case v2beta2.ResourcesPluralReceiver:
|
||||
if version == V2beta1 {
|
||||
return o.ksClient.NotificationV2beta1().Receivers().Update(context.Background(), obj.(*v2beta1.Receiver), v1.UpdateOptions{})
|
||||
} else {
|
||||
return o.ksClient.NotificationV2beta2().Receivers().Update(context.Background(), obj.(*v2beta2.Receiver), v1.UpdateOptions{})
|
||||
}
|
||||
case v2beta2.ResourcesPluralRouter:
|
||||
return o.ksClient.NotificationV2beta2().Routers().Update(context.Background(), obj.(*v2beta2.Router), v1.UpdateOptions{})
|
||||
case v2beta2.ResourcesPluralSilence:
|
||||
return o.ksClient.NotificationV2beta2().Silences().Update(context.Background(), obj.(*v2beta2.Silence), v1.UpdateOptions{})
|
||||
case Secret:
|
||||
return o.k8sClient.CoreV1().Secrets(constants.NotificationSecretNamespace).Update(context.Background(), obj.(*corev1.Secret), v1.UpdateOptions{})
|
||||
case ConfigMap:
|
||||
return o.k8sClient.CoreV1().ConfigMaps(constants.NotificationSecretNamespace).Update(context.Background(), obj.(*corev1.ConfigMap), v1.UpdateOptions{})
|
||||
default:
|
||||
return nil, errors.NewInternalError(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Patch an object, only a global object will be patched if the user is nil.
|
||||
// If the user is not nil, a tenant object whose tenant label matches the user will be patched.
|
||||
func (o *operator) Patch(user, resource, name string, data []byte) (runtime.Object, error) {
|
||||
|
||||
if user != "" {
|
||||
if resource == v2beta2.ResourcesPluralRouter ||
|
||||
resource == v2beta2.ResourcesPluralNotificationManager {
|
||||
return nil, errors.NewForbidden(v2beta2.Resource(resource), "",
|
||||
fmt.Errorf("tenant can not update %s", resource))
|
||||
}
|
||||
}
|
||||
|
||||
_, err := o.Get(user, resource, name, "")
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resource {
|
||||
case v2beta2.ResourcesPluralNotificationManager:
|
||||
return o.ksClient.NotificationV2beta2().NotificationManagers().Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
case v2beta2.ResourcesPluralConfig:
|
||||
return o.ksClient.NotificationV2beta2().Configs().Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
case v2beta2.ResourcesPluralReceiver:
|
||||
return o.ksClient.NotificationV2beta2().Receivers().Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
case v2beta2.ResourcesPluralRouter:
|
||||
return o.ksClient.NotificationV2beta2().Routers().Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
case v2beta2.ResourcesPluralSilence:
|
||||
return o.ksClient.NotificationV2beta2().Silences().Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
case Secret:
|
||||
return o.k8sClient.CoreV1().Secrets(constants.NotificationSecretNamespace).Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
case ConfigMap:
|
||||
return o.k8sClient.CoreV1().ConfigMaps(constants.NotificationSecretNamespace).Patch(context.Background(), name, types.MergePatchType, data, v1.PatchOptions{})
|
||||
default:
|
||||
return nil, errors.NewInternalError(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *operator) GetObject(resource, version string) runtime.Object {
|
||||
|
||||
switch resource {
|
||||
case v2beta2.ResourcesPluralNotificationManager:
|
||||
return &v2beta2.NotificationManager{}
|
||||
case v2beta2.ResourcesPluralConfig:
|
||||
if version == V2beta1 {
|
||||
return &v2beta1.Config{}
|
||||
} else {
|
||||
return &v2beta2.Config{}
|
||||
}
|
||||
case v2beta2.ResourcesPluralReceiver:
|
||||
if version == V2beta1 {
|
||||
return &v2beta1.Receiver{}
|
||||
} else {
|
||||
return &v2beta2.Receiver{}
|
||||
}
|
||||
case v2beta2.ResourcesPluralRouter:
|
||||
if version == V2beta1 {
|
||||
return nil
|
||||
}
|
||||
return &v2beta2.Router{}
|
||||
case v2beta2.ResourcesPluralSilence:
|
||||
if version == V2beta1 {
|
||||
return nil
|
||||
}
|
||||
return &v2beta2.Silence{}
|
||||
case Secret:
|
||||
return &corev1.Secret{}
|
||||
case ConfigMap:
|
||||
return &corev1.ConfigMap{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (o *operator) IsKnownResource(resource, version, subresource string) bool {
|
||||
|
||||
if obj := o.GetObject(resource, version); obj == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
res := false
|
||||
// "" means get all types of the config or receiver.
|
||||
if subresource == "dingtalk" ||
|
||||
subresource == "email" ||
|
||||
subresource == "slack" ||
|
||||
subresource == "webhook" ||
|
||||
subresource == "wechat" ||
|
||||
subresource == "" {
|
||||
res = true
|
||||
}
|
||||
|
||||
if version == V2beta2 && subresource == "feishu" {
|
||||
res = true
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (o *operator) Verify(request *restful.Request, response *restful.Response) {
|
||||
if o.options == nil || len(o.options.Endpoint) == 0 {
|
||||
_ = response.WriteAsJson(Result{
|
||||
http.StatusInternalServerError,
|
||||
"Cannot find Notification Manager endpoint",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
reqBody, err := io.ReadAll(request.Request.Body)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
_ = response.WriteHeaderAndEntity(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
data := Data{}
|
||||
err = json.Unmarshal(reqBody, &data)
|
||||
if err != nil {
|
||||
_ = response.WriteHeaderAndEntity(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
receiver := data.Receiver
|
||||
user := request.PathParameter("user")
|
||||
|
||||
if err := authorizer(user, &receiver); err != nil {
|
||||
klog.Error(err)
|
||||
_ = response.WriteHeaderAndEntity(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", fmt.Sprintf("%s%s", o.options.Endpoint, VerificationAPIPath), bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
_ = response.WriteHeaderAndEntity(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
req.Header = request.Request.Header
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
_ = response.WriteHeaderAndEntity(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
// return 500
|
||||
_ = response.WriteHeaderAndEntity(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.AddHeader(restful.HEADER_ContentType, restful.MIME_JSON)
|
||||
response.WriteHeader(http.StatusOK)
|
||||
_, _ = response.Write(body)
|
||||
}
|
||||
|
||||
// Does the user has permission to access this object.
|
||||
func authorizer(user string, obj runtime.Object) error {
|
||||
// If the user is not nil, it must equal to the tenant specified in labels of the object.
|
||||
if user != "" && !isOwner(user, obj) {
|
||||
return errors.NewForbidden(v2beta2.Resource(obj.GetObjectKind().GroupVersionKind().GroupKind().Kind), "",
|
||||
fmt.Errorf("user '%s' is not the owner of object", user))
|
||||
}
|
||||
|
||||
// If the user is nil, the object must be a global object.
|
||||
if user == "" && !isGlobal(obj) {
|
||||
return errors.NewForbidden(v2beta2.Resource(obj.GetObjectKind().GroupVersionKind().GroupKind().Kind), "",
|
||||
fmt.Errorf("object is not a global object"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Is the user equal to the tenant specified in the object labels.
|
||||
func isOwner(user string, obj interface{}) bool {
|
||||
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return false
|
||||
}
|
||||
|
||||
return accessor.GetLabels()["user"] == user && accessor.GetLabels()["type"] == "tenant"
|
||||
}
|
||||
|
||||
func isConfig(obj runtime.Object) bool {
|
||||
switch obj.(type) {
|
||||
case *v2beta1.Config, *v2beta2.Config:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Is the object is a global object.
|
||||
func isGlobal(obj runtime.Object) bool {
|
||||
|
||||
if _, ok := obj.(*v2beta2.NotificationManager); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return false
|
||||
}
|
||||
|
||||
if isConfig(obj) {
|
||||
return accessor.GetLabels()["type"] == "default"
|
||||
} else {
|
||||
return accessor.GetLabels()["type"] == "global"
|
||||
}
|
||||
}
|
||||
|
||||
func appendLabel(user, resource string, obj runtime.Object) error {
|
||||
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
labels := accessor.GetLabels()
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
|
||||
if resource == Secret || resource == ConfigMap {
|
||||
labels[constants.NotificationManagedLabel] = "true"
|
||||
}
|
||||
|
||||
if user == "" {
|
||||
if isConfig(obj) {
|
||||
labels["type"] = "default"
|
||||
} else {
|
||||
labels["type"] = "global"
|
||||
}
|
||||
} else {
|
||||
labels["type"] = "tenant"
|
||||
labels["user"] = user
|
||||
}
|
||||
|
||||
accessor.SetLabels(labels)
|
||||
return nil
|
||||
}
|
||||
|
||||
func clean(obj interface{}, resource, subresource string) runtime.Object {
|
||||
if resource == v2beta2.ResourcesPluralConfig {
|
||||
config := obj.(*v2beta2.Config)
|
||||
newConfig := config.DeepCopy()
|
||||
newConfig.Spec = v2beta2.ConfigSpec{}
|
||||
switch subresource {
|
||||
case "dingtalk":
|
||||
newConfig.Spec.DingTalk = config.Spec.DingTalk
|
||||
case "email":
|
||||
newConfig.Spec.Email = config.Spec.Email
|
||||
case "feishu":
|
||||
newConfig.Spec.Feishu = config.Spec.Feishu
|
||||
case "slack":
|
||||
newConfig.Spec.Slack = config.Spec.Slack
|
||||
case "webhook":
|
||||
newConfig.Spec.Webhook = config.Spec.Webhook
|
||||
case "wechat":
|
||||
newConfig.Spec.Wechat = config.Spec.Wechat
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if reflect.ValueOf(newConfig.Spec).IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return newConfig
|
||||
} else if resource == v2beta2.ResourcesPluralReceiver {
|
||||
receiver := obj.(*v2beta2.Receiver)
|
||||
newReceiver := receiver.DeepCopy()
|
||||
newReceiver.Spec = v2beta2.ReceiverSpec{}
|
||||
switch subresource {
|
||||
case "dingtalk":
|
||||
newReceiver.Spec.DingTalk = receiver.Spec.DingTalk
|
||||
case "email":
|
||||
newReceiver.Spec.Email = receiver.Spec.Email
|
||||
case "feishu":
|
||||
newReceiver.Spec.Feishu = receiver.Spec.Feishu
|
||||
case "slack":
|
||||
newReceiver.Spec.Slack = receiver.Spec.Slack
|
||||
case "webhook":
|
||||
newReceiver.Spec.Webhook = receiver.Spec.Webhook
|
||||
case "wechat":
|
||||
newReceiver.Spec.Wechat = receiver.Spec.Wechat
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if reflect.ValueOf(newReceiver.Spec).IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return newReceiver
|
||||
} else {
|
||||
return obj.(runtime.Object)
|
||||
}
|
||||
}
|
||||
|
||||
func convert(obj runtime.Object) runtime.Object {
|
||||
switch obj := obj.(type) {
|
||||
case *v2beta2.Config:
|
||||
dst := &v2beta1.Config{}
|
||||
_ = obj.ConvertTo(dst)
|
||||
return dst
|
||||
case *v2beta2.Receiver:
|
||||
dst := &v2beta1.Receiver{}
|
||||
_ = obj.ConvertTo(dst)
|
||||
return dst
|
||||
default:
|
||||
return obj
|
||||
}
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package notification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakek8s "k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
)
|
||||
|
||||
func TestOperator_List(t *testing.T) {
|
||||
o := prepare()
|
||||
tests := []struct {
|
||||
result *api.ListResult
|
||||
expectError error
|
||||
}{
|
||||
{
|
||||
result: &api.ListResult{
|
||||
Items: []interface{}{secret1, secret2, secret3},
|
||||
TotalItems: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result, err := o.List("", "secrets", "", &query.Query{
|
||||
SortBy: query.FieldName,
|
||||
Ascending: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if !reflect.DeepEqual(err, test.expectError) {
|
||||
t.Errorf("got %#v, expected %#v", err, test.expectError)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(result, test.result); diff != "" {
|
||||
t.Errorf("case %d, %s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperator_Get(t *testing.T) {
|
||||
o := prepare()
|
||||
tests := []struct {
|
||||
result *corev1.Secret
|
||||
name string
|
||||
expectError error
|
||||
}{
|
||||
{
|
||||
result: secret1,
|
||||
name: secret1.Name,
|
||||
expectError: nil,
|
||||
},
|
||||
{
|
||||
name: "foo4",
|
||||
expectError: errors.NewNotFound(corev1.Resource("secret"), "foo4"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result, err := o.Get("", "secrets", test.name, "")
|
||||
|
||||
if err != nil {
|
||||
if !reflect.DeepEqual(err, test.expectError) {
|
||||
t.Errorf("got %#v, expected %#v", err, test.expectError)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(result, test.result); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperator_Create(t *testing.T) {
|
||||
o := prepare()
|
||||
tests := []struct {
|
||||
result *corev1.Secret
|
||||
secret *corev1.Secret
|
||||
expectError error
|
||||
}{
|
||||
{
|
||||
result: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: constants.NotificationSecretNamespace,
|
||||
Labels: map[string]string{
|
||||
"type": "global",
|
||||
constants.NotificationManagedLabel: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
secret: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Labels: map[string]string{
|
||||
"type": "global",
|
||||
constants.NotificationManagedLabel: "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
result, err := o.Create("", "secrets", test.secret)
|
||||
|
||||
if err != nil {
|
||||
if !reflect.DeepEqual(err, test.expectError) {
|
||||
t.Errorf("case %d, got %#v, expected %#v", i, err, test.expectError)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(result, test.result); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperator_Delete(t *testing.T) {
|
||||
o := prepare()
|
||||
tests := []struct {
|
||||
name string
|
||||
expectError error
|
||||
}{
|
||||
{
|
||||
name: "foo4",
|
||||
expectError: errors.NewNotFound(corev1.Resource("secret"), "foo4"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := o.Delete("", "secrets", test.name)
|
||||
if err != nil {
|
||||
if test.expectError != nil && test.expectError.Error() == err.Error() {
|
||||
continue
|
||||
} else {
|
||||
if !reflect.DeepEqual(err, test.expectError) {
|
||||
t.Errorf("case %d, got %#v, expected %#v", i, err, test.expectError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
secret1 = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo1",
|
||||
Namespace: constants.NotificationSecretNamespace,
|
||||
Labels: map[string]string{
|
||||
"type": "global",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secret2 = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo2",
|
||||
Namespace: constants.NotificationSecretNamespace,
|
||||
Labels: map[string]string{
|
||||
"type": "global",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secret3 = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo3",
|
||||
Namespace: constants.NotificationSecretNamespace,
|
||||
Labels: map[string]string{
|
||||
"type": "global",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secrets = []*corev1.Secret{secret1, secret2, secret3}
|
||||
)
|
||||
|
||||
func prepare() Operator {
|
||||
|
||||
ksClient := fakeks.NewSimpleClientset()
|
||||
k8sClient := fakek8s.NewSimpleClientset()
|
||||
fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil)
|
||||
|
||||
for _, secret := range secrets {
|
||||
_ = fakeInformerFactory.KubernetesSharedInformerFactory().Core().V1().Secrets().Informer().GetIndexer().Add(secret)
|
||||
}
|
||||
|
||||
return NewOperator(fakeInformerFactory, k8sClient, ksClient, nil)
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
approvers:
|
||||
- zheng1
|
||||
- wansir
|
||||
- zryfish
|
||||
|
||||
reviewers:
|
||||
- zheng1
|
||||
- wansir
|
||||
- zryfish
|
||||
- xyz-li
|
||||
|
||||
labels:
|
||||
- area/api
|
||||
- area/app-management
|
||||
@@ -1,628 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
v1alpha13 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
listers_v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
type ApplicationInterface interface {
|
||||
ListApps(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
DescribeApp(id string) (*App, error)
|
||||
DeleteApp(id string) error
|
||||
CreateApp(req *CreateAppRequest) (*CreateAppResponse, error)
|
||||
ModifyApp(appId string, request *ModifyAppRequest) error
|
||||
DeleteAppVersion(id string) error
|
||||
ModifyAppVersion(id string, request *ModifyAppVersionRequest) error
|
||||
DescribeAppVersion(id string) (*AppVersion, error)
|
||||
CreateAppVersion(request *CreateAppVersionRequest) (*CreateAppVersionResponse, error)
|
||||
ValidatePackage(request *ValidatePackageRequest) (*ValidatePackageResponse, error)
|
||||
GetAppVersionPackage(appId, versionId string) (*GetAppVersionPackageResponse, error)
|
||||
DoAppAction(appId string, request *ActionRequest) error
|
||||
DoAppVersionAction(versionId string, request *ActionRequest) error
|
||||
ListAppVersionAudits(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
GetAppVersionFiles(versionId string, request *GetAppVersionFilesRequest) (*GetAppVersionPackageFilesResponse, error)
|
||||
ListAppVersionReviews(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
ListAppVersions(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
}
|
||||
|
||||
type applicationOperator struct {
|
||||
backingStoreClient s3.Interface
|
||||
informers externalversions.SharedInformerFactory
|
||||
|
||||
appClient v1alpha13.HelmApplicationInterface
|
||||
appVersionClient v1alpha13.HelmApplicationVersionInterface
|
||||
|
||||
appLister listers_v1alpha1.HelmApplicationLister
|
||||
versionLister listers_v1alpha1.HelmApplicationVersionLister
|
||||
|
||||
repoLister listers_v1alpha1.HelmRepoLister
|
||||
ctgLister listers_v1alpha1.HelmCategoryLister
|
||||
rlsLister listers_v1alpha1.HelmReleaseLister
|
||||
|
||||
cachedRepos reposcache.ReposCache
|
||||
}
|
||||
|
||||
func newApplicationOperator(cached reposcache.ReposCache, informers externalversions.SharedInformerFactory, ksClient versioned.Interface, storeClient s3.Interface) ApplicationInterface {
|
||||
op := &applicationOperator{
|
||||
backingStoreClient: storeClient,
|
||||
informers: informers,
|
||||
repoLister: informers.Application().V1alpha1().HelmRepos().Lister(),
|
||||
|
||||
appClient: ksClient.ApplicationV1alpha1().HelmApplications(),
|
||||
appVersionClient: ksClient.ApplicationV1alpha1().HelmApplicationVersions(),
|
||||
|
||||
appLister: informers.Application().V1alpha1().HelmApplications().Lister(),
|
||||
versionLister: informers.Application().V1alpha1().HelmApplicationVersions().Lister(),
|
||||
|
||||
ctgLister: informers.Application().V1alpha1().HelmCategories().Lister(),
|
||||
rlsLister: informers.Application().V1alpha1().HelmReleases().Lister(),
|
||||
cachedRepos: cached,
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
// save icon data and helm application
|
||||
func (c *applicationOperator) createApp(app *v1alpha1.HelmApplication, iconData string) (*v1alpha1.HelmApplication, error) {
|
||||
exists, err := c.getHelmAppByName(app.GetWorkspace(), app.GetTrueName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists != nil {
|
||||
return nil, appItemExists
|
||||
}
|
||||
if strings.HasPrefix(iconData, "http://") || strings.HasPrefix(iconData, "https://") {
|
||||
app.Spec.Icon = iconData
|
||||
} else if len(iconData) != 0 {
|
||||
// save icon attachment
|
||||
iconId := idutils.GetUuid(v1alpha1.HelmAttachmentPrefix)
|
||||
decodeString, err := base64.StdEncoding.DecodeString(iconData)
|
||||
if err != nil {
|
||||
klog.Errorf("decodeString icon failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.backingStoreClient.Upload(iconId, iconId, bytes.NewBuffer(decodeString), len(iconData))
|
||||
if err != nil {
|
||||
klog.Errorf("save icon attachment failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
app.Spec.Icon = iconId
|
||||
}
|
||||
|
||||
app, err = c.appClient.Create(context.TODO(), app, metav1.CreateOptions{})
|
||||
return app, err
|
||||
}
|
||||
|
||||
// get helm app by name in workspace
|
||||
func (c *applicationOperator) getHelmAppByName(workspace, name string) (*v1alpha1.HelmApplication, error) {
|
||||
ls := map[string]string{
|
||||
constants.WorkspaceLabelKey: workspace,
|
||||
}
|
||||
|
||||
list, err := c.appLister.List(labels.SelectorFromSet(ls))
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(list) > 0 {
|
||||
for _, a := range list {
|
||||
if a.GetTrueName() == name {
|
||||
return a, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ValidatePackage(request *ValidatePackageRequest) (*ValidatePackageResponse, error) {
|
||||
|
||||
chrt, err := helmrepoindex.LoadPackage(request.VersionPackage)
|
||||
|
||||
result := &ValidatePackageResponse{}
|
||||
|
||||
if err != nil {
|
||||
matchPackageFailedError(err, result)
|
||||
if (result.Error == "EOF" || result.Error == "") && len(result.ErrorDetails) == 0 {
|
||||
klog.Errorf("package parse failed, error: %s", err.Error())
|
||||
return nil, errors.New("package parse failed")
|
||||
}
|
||||
} else {
|
||||
result.Name = chrt.GetName()
|
||||
result.VersionName = chrt.GetVersionName()
|
||||
result.Description = chrt.GetDescription()
|
||||
result.URL = chrt.GetUrls()
|
||||
result.Icon = chrt.GetIcon()
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DoAppAction(appId string, request *ActionRequest) error {
|
||||
|
||||
app, err := c.getHelmApplication(appId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// All the app belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
|
||||
if repoId, exist := app.Labels[constants.ChartRepoIdLabelKey]; exist && repoId != v1alpha1.AppStoreRepoId {
|
||||
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplication), app.Name, errors.New("application is immutable"))
|
||||
}
|
||||
|
||||
var filterState string
|
||||
switch request.Action {
|
||||
case ActionSuspend:
|
||||
if app.Status.State != v1alpha1.StateActive {
|
||||
err = actionNotSupport
|
||||
}
|
||||
filterState = v1alpha1.StateActive
|
||||
case ActionRecover:
|
||||
if app.Status.State != v1alpha1.StateSuspended {
|
||||
err = actionNotSupport
|
||||
}
|
||||
filterState = v1alpha1.StateSuspended
|
||||
default:
|
||||
err = actionNotSupport
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var versions []*v1alpha1.HelmApplicationVersion
|
||||
ls := map[string]string{
|
||||
constants.ChartApplicationIdLabelKey: appId,
|
||||
}
|
||||
versions, err = c.versionLister.List(labels.SelectorFromSet(ls))
|
||||
if err != nil {
|
||||
klog.Errorf("get helm app %s version failed, error: %s", appId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
versions = filterAppVersionByState(versions, []string{filterState})
|
||||
for _, version := range versions {
|
||||
err = c.DoAppVersionAction(version.GetHelmApplicationVersionId(), request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) CreateApp(req *CreateAppRequest) (*CreateAppResponse, error) {
|
||||
if c.backingStoreClient == nil {
|
||||
return nil, invalidS3Config
|
||||
}
|
||||
chrt, err := helmrepoindex.LoadPackage(req.VersionPackage)
|
||||
if err != nil {
|
||||
klog.Errorf("load package %s/%s failed, error: %s", req.Isv, req.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create helm application
|
||||
name := idutils.GetUuid36(v1alpha1.HelmApplicationIdPrefix)
|
||||
helmApp := &v1alpha1.HelmApplication{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
constants.CreatorAnnotationKey: req.Username,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
constants.WorkspaceLabelKey: req.Isv,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.HelmApplicationSpec{
|
||||
Name: req.Name,
|
||||
Description: stringutils.ShortenString(chrt.GetDescription(), v1alpha1.MsgLen),
|
||||
Icon: stringutils.ShortenString(chrt.GetIcon(), v1alpha1.MsgLen),
|
||||
},
|
||||
}
|
||||
app, err := c.createApp(helmApp, req.Icon)
|
||||
if err != nil {
|
||||
klog.Errorf("create helm application %s/%s failed, error: %s", req.Isv, req.Name, err)
|
||||
if helmApp.Spec.Icon != "" {
|
||||
c.backingStoreClient.Delete(helmApp.Spec.Icon)
|
||||
}
|
||||
return nil, err
|
||||
} else {
|
||||
klog.V(4).Infof("helm application %s/%s created, app id: %s", req.Isv, req.Name, app.Name)
|
||||
}
|
||||
|
||||
// create app version
|
||||
chartPackage := req.VersionPackage.String()
|
||||
ver := buildApplicationVersion(app, chrt, &chartPackage, req.Username)
|
||||
ver, err = c.createApplicationVersion(ver)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("create helm application %s/%s versions failed, error: %s", req.Isv, req.Name, err)
|
||||
return nil, err
|
||||
} else {
|
||||
klog.V(4).Infof("helm application version %s/%s created, app version id: %s", req.Isv, req.Name, ver.Name)
|
||||
}
|
||||
|
||||
return &CreateAppResponse{
|
||||
AppID: app.GetHelmApplicationId(),
|
||||
VersionID: ver.GetHelmApplicationVersionId(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildLabelSelector(conditions *params.Conditions) map[string]string {
|
||||
ls := make(map[string]string)
|
||||
|
||||
repoId := conditions.Match[RepoId]
|
||||
// app store come first
|
||||
if repoId != "" {
|
||||
ls[constants.ChartRepoIdLabelKey] = repoId
|
||||
} else {
|
||||
if conditions.Match[WorkspaceLabel] != "" {
|
||||
ls[constants.WorkspaceLabelKey] = conditions.Match[WorkspaceLabel]
|
||||
}
|
||||
}
|
||||
if conditions.Match[CategoryId] != "" {
|
||||
ls[constants.CategoryIdLabelKey] = conditions.Match[CategoryId]
|
||||
}
|
||||
|
||||
return ls
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ListApps(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
apps, err := c.listApps(conditions)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
apps = filterApps(apps, conditions)
|
||||
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(HelmApplicationList(apps)))
|
||||
} else {
|
||||
sort.Sort(HelmApplicationList(apps))
|
||||
}
|
||||
|
||||
totalCount := len(apps)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
apps = apps[start:end]
|
||||
items := make([]interface{}, 0, len(apps))
|
||||
|
||||
for i := range apps {
|
||||
versions, err := c.getAppVersionsByAppId(apps[i].GetHelmApplicationId())
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
ctg, _ := c.ctgLister.Get(apps[i].GetHelmCategoryId())
|
||||
items = append(items, convertApp(apps[i], versions, ctg, 0))
|
||||
}
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DeleteApp(id string) error {
|
||||
app, err := c.appLister.Get(id)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
} else {
|
||||
klog.Errorf("get app %s failed, error: %s", id, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ls := map[string]string{
|
||||
constants.ChartApplicationIdLabelKey: app.GetHelmApplicationId(),
|
||||
}
|
||||
|
||||
list, err := c.versionLister.List(labels.SelectorFromSet(ls))
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("versions of app %s has been deleted", id)
|
||||
} else {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
} else if len(list) > 0 {
|
||||
return fmt.Errorf("app %s has some versions not deleted", id)
|
||||
}
|
||||
|
||||
err = c.appClient.Delete(context.TODO(), id, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("delete app %s failed, error: %s", id, err)
|
||||
return err
|
||||
} else {
|
||||
c.deleteAppAttachment(app)
|
||||
klog.V(4).Infof("app %s deleted", app.Name)
|
||||
}
|
||||
|
||||
// delete application in app store
|
||||
id = fmt.Sprintf("%s%s", id, v1alpha1.HelmApplicationAppStoreSuffix)
|
||||
app, err = c.appClient.Get(context.TODO(), id, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
} else {
|
||||
klog.Errorf("get app %s failed, error: %s", id, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// delete application in app store
|
||||
err = c.appClient.Delete(context.TODO(), id, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("delete app %s failed, error: %s", id, err)
|
||||
return err
|
||||
} else {
|
||||
c.deleteAppAttachment(app)
|
||||
klog.V(4).Infof("app %s deleted", app.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ModifyApp(appId string, request *ModifyAppRequest) error {
|
||||
if c.backingStoreClient == nil {
|
||||
return invalidS3Config
|
||||
}
|
||||
|
||||
app, err := c.getHelmApplication(appId)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// All the app belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
|
||||
if repoId, exist := app.Labels[constants.ChartRepoIdLabelKey]; exist && repoId != v1alpha1.AppStoreRepoId {
|
||||
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplication), app.Name, errors.New("application is immutable"))
|
||||
}
|
||||
|
||||
appCopy := app.DeepCopy()
|
||||
// modify category
|
||||
if request.CategoryID != nil {
|
||||
if *request.CategoryID == "" {
|
||||
delete(appCopy.Labels, constants.CategoryIdLabelKey)
|
||||
klog.V(4).Infof("delete app %s category", app.Name)
|
||||
} else {
|
||||
appCopy.Labels[constants.CategoryIdLabelKey] = *request.CategoryID
|
||||
klog.V(4).Infof("set app %s category to %s", app.Name, *request.CategoryID)
|
||||
}
|
||||
}
|
||||
|
||||
// modify app name
|
||||
if request.Name != nil && len(*request.Name) > 0 && app.GetTrueName() != *request.Name {
|
||||
existsApp, err := c.getHelmAppByName(app.GetWorkspace(), *request.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existsApp != nil {
|
||||
return appItemExists
|
||||
}
|
||||
klog.V(4).Infof("change app %s name from %s to %s", app.Name, app.GetTrueName(), *request.Name)
|
||||
appCopy.Spec.Name = *request.Name
|
||||
}
|
||||
|
||||
// save app attachment and icon
|
||||
add, err := c.modifyAppAttachment(appCopy, request)
|
||||
if err != nil {
|
||||
klog.Errorf("add app attachment %s failed, error: %s", appCopy.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if request.Description != nil {
|
||||
appCopy.Spec.Description = *request.Description
|
||||
}
|
||||
if request.Abstraction != nil {
|
||||
appCopy.Spec.Abstraction = *request.Abstraction
|
||||
}
|
||||
|
||||
if request.Home != nil {
|
||||
appCopy.Spec.AppHome = *request.Home
|
||||
}
|
||||
appCopy.Status.UpdateTime = &metav1.Time{Time: time.Now()}
|
||||
|
||||
patch := client.MergeFrom(app)
|
||||
data, err := patch.Data(appCopy)
|
||||
if err != nil {
|
||||
klog.Errorf("create patch failed, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.appClient.UpdateStatus(context.TODO(), appCopy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("update helm application status: %s failed, error: %s", appId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.appClient.Patch(context.TODO(), appId, patch.Type(), data, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("patch helm application: %s failed, error: %s", appId, err)
|
||||
if add != "" {
|
||||
// if patch failed, delete saved icon or attachment
|
||||
c.backingStoreClient.Delete(add)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) deleteAppAttachment(app *v1alpha1.HelmApplication) {
|
||||
if app.Spec.Icon != "" {
|
||||
c.backingStoreClient.Delete(app.Spec.Icon)
|
||||
}
|
||||
|
||||
for _, id := range app.Spec.Attachments {
|
||||
c.backingStoreClient.Delete(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *applicationOperator) modifyAppAttachment(app *v1alpha1.HelmApplication, request *ModifyAppRequest) (add string, err error) {
|
||||
if request.Type == nil {
|
||||
return "", nil
|
||||
}
|
||||
switch *request.Type {
|
||||
case v1alpha1.AttachmentTypeScreenshot:
|
||||
if request.Sequence == nil {
|
||||
return "", nil
|
||||
}
|
||||
seq := *request.Sequence
|
||||
attachments := &app.Spec.Attachments
|
||||
if len(request.AttachmentContent) == 0 {
|
||||
// delete old attachments
|
||||
if len(*attachments) > int(seq) {
|
||||
del := (*attachments)[seq]
|
||||
err = c.backingStoreClient.Delete(del)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
*attachments = append((*attachments)[:seq], (*attachments)[seq+1:]...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(*attachments) < 6 {
|
||||
// add attachment to app
|
||||
add := idutils.GetUuid("att-")
|
||||
*attachments = append(*attachments, add)
|
||||
err = c.backingStoreClient.Upload(add, add, bytes.NewBuffer(request.AttachmentContent), len(request.AttachmentContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
return add, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case v1alpha1.AttachmentTypeIcon: // modify app icon
|
||||
// delete old icon
|
||||
if app.Spec.Icon != "" {
|
||||
err = c.backingStoreClient.Delete(app.Spec.Icon)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(request.AttachmentContent) != 0 {
|
||||
add := idutils.GetUuid("att-")
|
||||
err = c.backingStoreClient.Upload(add, add, bytes.NewBuffer(request.AttachmentContent), len(request.AttachmentContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
app.Spec.Icon = add
|
||||
return add, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DescribeApp(id string) (*App, error) {
|
||||
var helmApp *v1alpha1.HelmApplication
|
||||
var ctg *v1alpha1.HelmCategory
|
||||
var err error
|
||||
|
||||
helmApp, err = c.getHelmApplication(id)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versions, err := c.getAppVersionsByAppId(helmApp.GetHelmApplicationId())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctg, err = c.ctgLister.Get(helmApp.GetHelmCategoryId())
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app := convertApp(helmApp, versions, ctg, 0)
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) listApps(conditions *params.Conditions) (ret []*v1alpha1.HelmApplication, err error) {
|
||||
repoId := conditions.Match[RepoId]
|
||||
if repoId != "" && repoId != v1alpha1.AppStoreRepoId {
|
||||
// get helm application from helm repo
|
||||
if ret, exists := c.cachedRepos.ListApplicationsInRepo(repoId); !exists {
|
||||
klog.Warningf("load repo failed, repo id: %s", repoId)
|
||||
return nil, loadRepoInfoFailed
|
||||
} else {
|
||||
return ret, nil
|
||||
}
|
||||
} else if repoId == v1alpha1.AppStoreRepoId {
|
||||
// List apps in the app-store and built-in repo
|
||||
if c.backingStoreClient == nil {
|
||||
return []*v1alpha1.HelmApplication{}, nil
|
||||
}
|
||||
|
||||
ls := map[string]string{}
|
||||
// We just care about the category label when listing apps in built-in repo.
|
||||
if conditions.Match[CategoryId] != "" {
|
||||
ls[constants.CategoryIdLabelKey] = conditions.Match[CategoryId]
|
||||
}
|
||||
appInRepo, _ := c.cachedRepos.ListApplicationsInBuiltinRepo(labels.SelectorFromSet(ls))
|
||||
|
||||
ret, err = c.appLister.List(labels.SelectorFromSet(buildLabelSelector(conditions)))
|
||||
ret = append(ret, appInRepo...)
|
||||
} else {
|
||||
if c.backingStoreClient == nil {
|
||||
return []*v1alpha1.HelmApplication{}, nil
|
||||
}
|
||||
|
||||
ret, err = c.appLister.List(labels.SelectorFromSet(buildLabelSelector(conditions)))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *applicationOperator) getHelmApplication(appId string) (*v1alpha1.HelmApplication, error) {
|
||||
if app, exists := c.cachedRepos.GetApplication(appId); exists {
|
||||
return app, nil
|
||||
} else {
|
||||
return c.appLister.Get(appId)
|
||||
}
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
fakek8s "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3/fake"
|
||||
)
|
||||
|
||||
var rawChartData = "H4sIFAAAAAAA/ykAK2FIUjBjSE02THk5NWIzVjBkUzVpWlM5Nk9WVjZNV2xqYW5keVRRbz1IZWxtAOxYUW/bNhDOM3/FTVmBNltoubEdQEAfirTAim1pMA/ZwzAUtHSS2FAkS1JOvLT77QNJ2XGUZEmxJukw34NEkcfj3fG+41EOrRsc1Mw4umCN2LoPStM0nYxG4Z2maf+dDif7W8NROhyP0v3R/t5WOnw+nAy3IL0XbXrUWsfMVvqv1+ob9x8hpvkxGsuVzGD+nDCtV59DOpzQlBRoc8O1C30v4QcUDeQ+YKBUBn5sZ2gkOrREsgYz8AFF3EJjBkxrwXPmZ5L5UmpKhzQlj232hjoK+J8z0aK9twRwG/6fD8d9/I+GG/w/CBkMGD1QrXQZDAnhDaswIwAGtbLcKbPIQFZcnhEA3QpxpATPFxm8KQ+VOzJoUToC4FiVQZJ0Ao5aIaaYG3Q2g9//CLnh7RyN4QUGtrIV4krnYzvjf0gB/w4bLZhDO3hXo9BoLHX6y6WCW/C/t7c/6eF/NN6c/w9D5+eDHZjzJgOLDkou0J/dLxrvlrzGDHYGnz4Rz0Ven2kmC3A1gkcuqDK0Qy1ASce3CwWWXCIkPrKoZ0xg92KItcIBjQXnoZdCj+Phs54M4CM408ocJnuhyZtpW5b8DJLdBDpZKAvfjKodGGQOga1W8OllAR9aJnjJsfClSFCakt8wyg78zq/gDbAww5y1FsGqBteqmmhqyVEUFphBELzhDgtwClzNLTydLYIbXh1OPS+XFViN+TNK3pRgUCCznb9yJR3j0nbVU+jjDk65EDBDaK3X0wILynfaXu/VZfK88CwvV47sZ9alw24cv4uzhV3J+TYonr24+25e6LhyQRRCf4n+iXOXel7q/EzltOHSlZA8sbtPbNKTFRe9e2xd37wUcWtb6bHRVbl+G8N2drERuQSbobhpSwPLxX727Vh3cWx3ZTp89Ae1YDlC8l0Cybvk88GjmkbJqJ69Qb04GPWrUTTU1oOgcgbn58BlLtqiZwqNi/UGLQrMnTI/dQLpWnR0lr1c3UH8GNOanqzgSLkarK4S5+fXTPkIH1rlsGfpVSkNk6zCYne2iIKWkTJFM+d5f3701LRT/p991Tdx99r1423pin8irOn1OnNpHZM5XtZ4HTzXxWg/YdvOQpbnvurzmay1eKMxgfll5D28KelcZqN5XLmX9p9eNvUii9FnNwmS67at4XwpMukayZ0EXMHyY5++j0+9+i9XsuRVw/SXvAze+v9nnPbqv3E63tR/D0InXBYZHIRt/5lp0qBjBXPM3wBXKWoZH1eBG/PU2i+kIVnO9qwZ+C8CsEHaV0oB/9Qf6bySyuB9rHEb/sd7V/7/7E3GG/w/BG3DEXMOjbS+DogxAKc1Spi1XBT+OqNZfsIqtJRsw6/+ymNbrZVxFmyNQkAl1Awa5vKay+p7f+dhjs8RNHP1Wj+TBdkGiVX4IQxPtcGSn2EBp9zV8M0zCm+lWICSYaZXCTQaEFwiJfTV9N3UKYNkG7p69fhgCgU3ltCKu0F4RvUJnf1pBuG57KirgX8sP+1cDi4EzVh+0upw97Vkh9pTTXbojJ2QHeoa31aGV2TnL7INx8xw1Vp48+q1JVQb9R5zRygvkA0iu1HvCZ3bXBU42CS9DW1oQ18z/R0AAP//GfF7tgAeAAA="
|
||||
|
||||
func TestOpenPitrixApp(t *testing.T) {
|
||||
appOperator := prepareAppOperator()
|
||||
|
||||
chartData, _ := base64.RawStdEncoding.DecodeString(rawChartData)
|
||||
|
||||
validateReq := &ValidatePackageRequest{
|
||||
VersionPackage: chartData,
|
||||
}
|
||||
// validate package
|
||||
validateResp, err := appOperator.ValidatePackage(validateReq)
|
||||
if err != nil || validateResp.Error != "" {
|
||||
klog.Errorf("validate package failed, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
validateReq = &ValidatePackageRequest{
|
||||
VersionPackage: strfmt.Base64(""),
|
||||
}
|
||||
|
||||
// validate corrupted package
|
||||
_, err = appOperator.ValidatePackage(validateReq)
|
||||
if err == nil {
|
||||
klog.Errorf("validate package failed, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
appReq := &CreateAppRequest{
|
||||
Isv: testWorkspace,
|
||||
Name: "test-chart",
|
||||
VersionName: "0.1.0",
|
||||
VersionPackage: strfmt.Base64(chartData),
|
||||
}
|
||||
|
||||
// create app
|
||||
createAppResp, err := appOperator.CreateApp(appReq)
|
||||
if err != nil {
|
||||
klog.Errorf("create app failed")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// add app to indexer
|
||||
apps, _ := ksClient.ApplicationV1alpha1().HelmApplications().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, app := range apps.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmApplications().
|
||||
Informer().GetIndexer().Add(&app)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add app to indexer")
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// add app version to indexer
|
||||
appvers, _ := ksClient.ApplicationV1alpha1().HelmApplicationVersions().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, ver := range appvers.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmApplicationVersions().
|
||||
Informer().GetIndexer().Add(&ver)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add app version to indexer")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
// describe app
|
||||
app, err := appOperator.DescribeApp(createAppResp.AppID)
|
||||
if err != nil {
|
||||
klog.Errorf("describe app failed, err: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
_ = app
|
||||
|
||||
cond := ¶ms.Conditions{Match: map[string]string{
|
||||
WorkspaceLabel: testWorkspace,
|
||||
}}
|
||||
// list apps
|
||||
listApps, err := appOperator.ListApps(cond, "", false, 10, 0)
|
||||
if err != nil {
|
||||
klog.Errorf("list app failed")
|
||||
t.FailNow()
|
||||
}
|
||||
_ = listApps
|
||||
|
||||
// describe app
|
||||
describeAppVersion, err := appOperator.DescribeAppVersion(createAppResp.VersionID)
|
||||
if err != nil {
|
||||
klog.Errorf("describe app version failed, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
_ = describeAppVersion
|
||||
|
||||
cond.Match[AppId] = createAppResp.AppID
|
||||
// list app version
|
||||
_, err = appOperator.ListAppVersions(cond, "", false, 10, 0)
|
||||
if err != nil {
|
||||
klog.Errorf("list app version failed")
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// get app version file
|
||||
getAppVersionFilesRequest := &GetAppVersionFilesRequest{}
|
||||
_, err = appOperator.GetAppVersionFiles(createAppResp.VersionID, getAppVersionFilesRequest)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("get app version files failed")
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
//delete app
|
||||
err = appOperator.DeleteApp(createAppResp.AppID)
|
||||
|
||||
if err == nil {
|
||||
klog.Errorf("we should delete application version first")
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
//delete app
|
||||
err = appOperator.DeleteAppVersion(createAppResp.VersionID)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("delete application version failed, err: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
ksClient versioned.Interface
|
||||
k8sClient kubernetes.Interface
|
||||
fakeInformerFactory informers.InformerFactory
|
||||
testWorkspace = "test-workspace"
|
||||
)
|
||||
|
||||
func prepareAppOperator() ApplicationInterface {
|
||||
ksClient = fakeks.NewSimpleClientset()
|
||||
k8sClient = fakek8s.NewSimpleClientset()
|
||||
fakeInformerFactory = informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil)
|
||||
|
||||
return newApplicationOperator(reposcache.NewReposCache(), fakeInformerFactory.KubeSphereSharedInformerFactory(), ksClient, fake.NewFakeS3())
|
||||
}
|
||||
@@ -1,593 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/go-openapi/strfmt"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
func (c *applicationOperator) GetAppVersionPackage(appId, versionId string) (*GetAppVersionPackageResponse, error) {
|
||||
var version *v1alpha1.HelmApplicationVersion
|
||||
var err error
|
||||
|
||||
version, err = c.getAppVersionByVersionIdWithData(versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &GetAppVersionPackageResponse{
|
||||
AppId: appId,
|
||||
VersionId: versionId,
|
||||
Package: version.Spec.Data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// check helm package and create helm app version if not exist
|
||||
func (c *applicationOperator) CreateAppVersion(request *CreateAppVersionRequest) (*CreateAppVersionResponse, error) {
|
||||
if c.backingStoreClient == nil {
|
||||
return nil, invalidS3Config
|
||||
}
|
||||
|
||||
chrt, err := helmrepoindex.LoadPackage(request.Package)
|
||||
if err != nil {
|
||||
klog.Errorf("load package failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app, err := c.appLister.Get(request.AppId)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("get app %s failed, error: %s", request.AppId, err)
|
||||
return nil, err
|
||||
}
|
||||
chartPackage := request.Package.String()
|
||||
version := buildApplicationVersion(app, chrt, &chartPackage, request.Username)
|
||||
version, err = c.createApplicationVersion(version)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("create helm app version failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("create helm app version %s success", request.Name)
|
||||
|
||||
return &CreateAppVersionResponse{
|
||||
VersionId: version.GetHelmApplicationVersionId(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DeleteAppVersion(id string) error {
|
||||
appVersion, err := c.versionLister.Get(id)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
} else {
|
||||
klog.Infof("get app version %s failed, error: %s", id, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch appVersion.Status.State {
|
||||
case v1alpha1.StateActive:
|
||||
klog.Warningf("delete app version %s/%s not permitted, current state:%s", appVersion.GetWorkspace(),
|
||||
appVersion.GetTrueName(), appVersion.Status.State)
|
||||
return actionNotPermitted
|
||||
}
|
||||
|
||||
// Delete data in storage
|
||||
err = c.backingStoreClient.Delete(dataKeyInStorage(appVersion.GetWorkspace(), id))
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok && aerr.Code() != s3.ErrCodeNoSuchKey {
|
||||
klog.Errorf("delete app version %s/%s data failed, error: %s", appVersion.GetWorkspace(), appVersion.Name, err)
|
||||
return deleteDataInStorageFailed
|
||||
}
|
||||
}
|
||||
|
||||
// delete app version in etcd
|
||||
err = c.appVersionClient.Delete(context.TODO(), id, metav1.DeleteOptions{})
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("delete app version %s failed", err)
|
||||
return err
|
||||
} else {
|
||||
klog.Infof("app version %s deleted", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DescribeAppVersion(id string) (*AppVersion, error) {
|
||||
version, err := c.getAppVersion(id)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version [%s] failed, error: %s", id, err)
|
||||
return nil, err
|
||||
}
|
||||
app := convertAppVersion(version)
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ModifyAppVersion(id string, request *ModifyAppVersionRequest) error {
|
||||
|
||||
version, err := c.getAppVersion(id)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version [%s] failed, error: %s", id, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// All the app versions belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
|
||||
if repoId, exists := version.Labels[constants.ChartRepoIdLabelKey]; exists && repoId != v1alpha1.AppStoreRepoId {
|
||||
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplicationVersion), version.Name, errors.New("version is immutable"))
|
||||
}
|
||||
|
||||
versionCopy := version.DeepCopy()
|
||||
spec := &versionCopy.Spec
|
||||
|
||||
// extract information from chart package
|
||||
if len(request.Package) > 0 {
|
||||
if version.Status.State != v1alpha1.StateDraft {
|
||||
return actionNotPermitted
|
||||
}
|
||||
// 1. Parse the chart package
|
||||
chart, err := helmrepoindex.LoadPackage(request.Package)
|
||||
if err != nil {
|
||||
klog.Errorf("load package failed, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// chart name must match with the original one
|
||||
if spec.Name != chart.GetName() {
|
||||
return fmt.Errorf("chart name not match, current name: %s, original name: %s", chart.GetName(), spec.Name)
|
||||
}
|
||||
|
||||
// new version name
|
||||
if chart.GetVersionName() != version.GetVersionName() {
|
||||
existsVersion, err := c.getAppVersionByVersionName(version.GetHelmApplicationId(), chart.GetVersionName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existsVersion != nil {
|
||||
return appVersionItemExists
|
||||
}
|
||||
}
|
||||
|
||||
// 2. update crd info
|
||||
spec.Version = chart.GetVersion()
|
||||
spec.AppVersion = chart.GetAppVersion()
|
||||
spec.Icon = chart.GetIcon()
|
||||
spec.Home = chart.GetHome()
|
||||
spec.Description = stringutils.ShortenString(chart.GetDescription(), v1alpha1.MsgLen)
|
||||
|
||||
now := metav1.Now()
|
||||
spec.Created = &now
|
||||
|
||||
// 3. save chart data to s3 storage, just overwrite the legacy data
|
||||
err = c.backingStoreClient.Upload(dataKeyInStorage(versionCopy.GetWorkspace(), versionCopy.Name), versionCopy.Name, bytes.NewBuffer(request.Package), len(request.Package))
|
||||
if err != nil {
|
||||
klog.Errorf("upload chart for app version: %s/%s failed, error: %s", versionCopy.GetWorkspace(),
|
||||
versionCopy.GetTrueName(), err)
|
||||
return uploadChartDataFailed
|
||||
} else {
|
||||
klog.V(4).Infof("chart data uploaded for app version: %s/%s", versionCopy.GetWorkspace(), versionCopy.GetTrueName())
|
||||
}
|
||||
} else {
|
||||
// new version name
|
||||
if request.Name != nil && *request.Name != "" && version.GetVersionName() != *request.Name {
|
||||
spec.Version, spec.AppVersion = parseChartVersionName(*request.Name)
|
||||
existsVersion, err := c.getAppVersionByVersionName(version.GetHelmApplicationId(), *request.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existsVersion != nil {
|
||||
return appVersionItemExists
|
||||
}
|
||||
}
|
||||
|
||||
if request.Description != nil && *request.Description != "" {
|
||||
spec.Description = stringutils.ShortenString(*request.Description, v1alpha1.MsgLen)
|
||||
}
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(version)
|
||||
data, err := patch.Data(versionCopy)
|
||||
if err != nil {
|
||||
klog.Error("create patch failed", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// data == "{}", need not to patch
|
||||
if len(data) == 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = c.appVersionClient.Patch(context.TODO(), id, patch.Type(), data, metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ListAppVersions(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
versions, err := c.getAppVersionsByAppId(conditions.Match[AppId])
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versions = filterAppVersions(versions, conditions)
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(AppVersions(versions)))
|
||||
} else {
|
||||
sort.Sort(AppVersions(versions))
|
||||
}
|
||||
|
||||
totalCount := len(versions)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
versions = versions[start:end]
|
||||
items := make([]interface{}, 0, len(versions))
|
||||
for i := range versions {
|
||||
items = append(items, convertAppVersion(versions[i]))
|
||||
}
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ListAppVersionReviews(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
appVersions, err := c.versionLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := filterAppReviews(appVersions, conditions)
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(AppVersionReviews(filtered)))
|
||||
} else {
|
||||
sort.Sort(AppVersionReviews(filtered))
|
||||
}
|
||||
|
||||
totalCount := len(filtered)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
filtered = filtered[start:end]
|
||||
items := make([]interface{}, 0, len(filtered))
|
||||
for i := range filtered {
|
||||
app, err := c.appLister.Get(filtered[i].GetHelmApplicationId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
review := convertAppVersionReview(app, filtered[i])
|
||||
items = append(items, review)
|
||||
}
|
||||
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ListAppVersionAudits(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
appId := conditions.Match[AppId]
|
||||
versionId := conditions.Match[VersionId]
|
||||
|
||||
var versions []*v1alpha1.HelmApplicationVersion
|
||||
var err error
|
||||
if versionId == "" {
|
||||
ls := map[string]string{
|
||||
constants.ChartApplicationIdLabelKey: appId,
|
||||
}
|
||||
versions, err = c.versionLister.List(labels.SelectorFromSet(ls))
|
||||
if err != nil {
|
||||
klog.Errorf("get app %s failed, error: %s", appId, err)
|
||||
}
|
||||
} else {
|
||||
version, err := c.versionLister.Get(versionId)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version %s failed, error: %s", versionId, err)
|
||||
}
|
||||
versions = []*v1alpha1.HelmApplicationVersion{version}
|
||||
}
|
||||
|
||||
var allAudits []*AppVersionAudit
|
||||
for _, item := range versions {
|
||||
audits := convertAppVersionAudit(item)
|
||||
allAudits = append(allAudits, audits...)
|
||||
}
|
||||
|
||||
sort.Sort(AppVersionAuditList(allAudits))
|
||||
|
||||
totalCount := len(allAudits)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
allAudits = allAudits[start:end]
|
||||
items := make([]interface{}, 0, len(allAudits))
|
||||
|
||||
for i := range allAudits {
|
||||
items = append(items, allAudits[i])
|
||||
}
|
||||
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DoAppVersionAction(versionId string, request *ActionRequest) error {
|
||||
var err error
|
||||
t := metav1.Now()
|
||||
var audit = v1alpha1.Audit{
|
||||
Message: request.Message,
|
||||
Operator: request.Username,
|
||||
Time: t,
|
||||
}
|
||||
state := v1alpha1.StateDraft
|
||||
|
||||
version, err := c.getAppVersion(versionId)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version %s failed, error: %s", versionId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// All the app versions belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
|
||||
if repoId, exists := version.Labels[constants.ChartRepoIdLabelKey]; exists && repoId != v1alpha1.AppStoreRepoId {
|
||||
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplicationVersion), version.Name, errors.New("version is immutable"))
|
||||
}
|
||||
|
||||
switch request.Action {
|
||||
case ActionCancel:
|
||||
state = v1alpha1.StateDraft
|
||||
audit.State = v1alpha1.StateDraft
|
||||
case ActionPass:
|
||||
state = v1alpha1.StatePassed
|
||||
audit.State = v1alpha1.StatePassed
|
||||
case ActionRecover:
|
||||
state = v1alpha1.StateActive
|
||||
audit.State = v1alpha1.StateActive
|
||||
case ActionReject:
|
||||
// todo check status
|
||||
state = v1alpha1.StateRejected
|
||||
audit.State = v1alpha1.StateRejected
|
||||
case ActionSubmit:
|
||||
// todo check status
|
||||
state = v1alpha1.StateSubmitted
|
||||
audit.State = v1alpha1.StateSubmitted
|
||||
case ActionSuspend:
|
||||
state = v1alpha1.StateSuspended
|
||||
audit.State = v1alpha1.StateSuspended
|
||||
case ActionRelease:
|
||||
// release to app store
|
||||
state = v1alpha1.StateActive
|
||||
audit.State = v1alpha1.StateActive
|
||||
default:
|
||||
err = errors.New("action not support")
|
||||
}
|
||||
|
||||
_ = state
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
version, err = c.updateAppVersionStatus(version, state, &audit)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("update app version audit [%s] failed, error: %s", versionId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if request.Action == ActionRelease || request.Action == ActionRecover {
|
||||
// if we release a new helm application version, we need update the spec in helm application copy
|
||||
app, err := c.appLister.Get(version.GetHelmApplicationId())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
appInStore, err := c.appLister.Get(fmt.Sprintf("%s%s", version.GetHelmApplicationId(), v1alpha1.HelmApplicationAppStoreSuffix))
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// controller-manager will create application in app store
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(&app.Spec, &appInStore.Spec) || !reflect.DeepEqual(app.Labels[constants.CategoryIdLabelKey], appInStore.Labels[constants.CategoryIdLabelKey]) {
|
||||
appCopy := appInStore.DeepCopy()
|
||||
appCopy.Labels[constants.CategoryIdLabelKey] = app.Labels[constants.CategoryIdLabelKey]
|
||||
appCopy.Spec = app.Spec
|
||||
patch := client.MergeFrom(appInStore)
|
||||
data, _ := patch.Data(appCopy)
|
||||
_, err = c.appClient.Patch(context.TODO(), appCopy.Name, patch.Type(), data, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) getAppVersionByVersionName(appId, verName string) (*v1alpha1.HelmApplicationVersion, error) {
|
||||
ls := map[string]string{
|
||||
constants.ChartApplicationIdLabelKey: appId,
|
||||
}
|
||||
|
||||
versions, err := c.versionLister.List(labels.SelectorFromSet(ls))
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, ver := range versions {
|
||||
if verName == ver.GetVersionName() {
|
||||
return ver, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Create helmApplicationVersion and helmAudit
|
||||
func (c *applicationOperator) createApplicationVersion(ver *v1alpha1.HelmApplicationVersion) (*v1alpha1.HelmApplicationVersion, error) {
|
||||
existsVersion, err := c.getAppVersionByVersionName(ver.GetHelmApplicationId(), ver.GetVersionName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if existsVersion != nil {
|
||||
klog.V(2).Infof("helm application version: %s exist", ver.GetVersionName())
|
||||
return nil, appVersionItemExists
|
||||
}
|
||||
|
||||
// save chart data to s3 storage
|
||||
_, err = base64.StdEncoding.Decode(ver.Spec.Data, ver.Spec.Data)
|
||||
if err != nil {
|
||||
klog.Errorf("decode error: %s", err)
|
||||
return nil, err
|
||||
} else {
|
||||
err = c.backingStoreClient.Upload(dataKeyInStorage(ver.GetWorkspace(), ver.Name), ver.Name, bytes.NewBuffer(ver.Spec.Data), len(ver.Spec.Data))
|
||||
if err != nil {
|
||||
klog.Errorf("upload chart for app version: %s/%s failed, error: %s", ver.GetWorkspace(),
|
||||
ver.GetTrueName(), err)
|
||||
return nil, uploadChartDataFailed
|
||||
} else {
|
||||
klog.V(4).Infof("chart data uploaded for app version: %s/%s", ver.GetWorkspace(), ver.GetTrueName())
|
||||
}
|
||||
}
|
||||
|
||||
// data will not save to etcd
|
||||
ver.Spec.Data = nil
|
||||
ver.Spec.DataKey = ver.Name
|
||||
version, err := c.appVersionClient.Create(context.TODO(), ver, metav1.CreateOptions{})
|
||||
if err == nil {
|
||||
klog.V(4).Infof("create helm application %s version success", version.Name)
|
||||
}
|
||||
|
||||
return version, err
|
||||
}
|
||||
|
||||
func (c *applicationOperator) updateAppVersionStatus(version *v1alpha1.HelmApplicationVersion, state string, status *v1alpha1.Audit) (*v1alpha1.HelmApplicationVersion, error) {
|
||||
version.Status.State = state
|
||||
|
||||
states := append([]v1alpha1.Audit{*status}, version.Status.Audit...)
|
||||
if len(version.Status.Audit) >= v1alpha1.HelmRepoSyncStateLen {
|
||||
// strip the last item
|
||||
states = states[:v1alpha1.HelmRepoSyncStateLen:v1alpha1.HelmRepoSyncStateLen]
|
||||
}
|
||||
|
||||
version.Status.Audit = states
|
||||
version, err := c.appVersionClient.UpdateStatus(context.TODO(), version, metav1.UpdateOptions{})
|
||||
|
||||
return version, err
|
||||
}
|
||||
|
||||
func (c *applicationOperator) GetAppVersionFiles(versionId string, request *GetAppVersionFilesRequest) (*GetAppVersionPackageFilesResponse, error) {
|
||||
var version *v1alpha1.HelmApplicationVersion
|
||||
var err error
|
||||
|
||||
// get chart data
|
||||
version, err = c.getAppVersionByVersionIdWithData(versionId)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version %s chart data failed: %v", versionId, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse chart data
|
||||
chartData, err := loader.LoadArchive(bytes.NewReader(version.Spec.Data))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to load package for app version: %s, error: %+v", versionId, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &GetAppVersionPackageFilesResponse{Files: map[string]strfmt.Base64{}, VersionId: versionId}
|
||||
for _, f := range chartData.Raw {
|
||||
res.Files[f.Name] = f.Data
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) getAppVersionByVersionIdWithData(versionId string) (*v1alpha1.HelmApplicationVersion, error) {
|
||||
if version, exists, err := c.cachedRepos.GetAppVersionWithData(versionId); exists {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
version, err := c.versionLister.Get(versionId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := c.backingStoreClient.Read(dataKeyInStorage(version.GetWorkspace(), versionId))
|
||||
if err != nil {
|
||||
klog.Errorf("load chart data for app version: %s/%s failed, error : %s", version.GetTrueName(),
|
||||
version.GetTrueName(), err)
|
||||
return nil, downloadFileFailed
|
||||
}
|
||||
version.Spec.Data = data
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) getAppVersionsByAppId(appId string) (ret []*v1alpha1.HelmApplicationVersion, err error) {
|
||||
if ret, exists := c.cachedRepos.ListAppVersionsByAppId(appId); exists {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// list app version from client-go
|
||||
ret, err = c.versionLister.List(labels.SelectorFromSet(map[string]string{constants.ChartApplicationIdLabelKey: appId}))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// get app version from repo and helm application
|
||||
func (c *applicationOperator) getAppVersion(id string) (ret *v1alpha1.HelmApplicationVersion, err error) {
|
||||
if ver, exists, _ := c.cachedRepos.GetAppVersion(id); exists {
|
||||
return ver, nil
|
||||
}
|
||||
|
||||
ret, err = c.versionLister.Get(id)
|
||||
return
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
// /*
|
||||
// Copyright 2020 The KubeSphere Authors.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// */
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
)
|
||||
|
||||
type AttachmentInterface interface {
|
||||
DescribeAttachment(id string) (*Attachment, error)
|
||||
CreateAttachment(data []byte) (*Attachment, error)
|
||||
DeleteAttachments(ids []string) error
|
||||
}
|
||||
|
||||
type attachmentOperator struct {
|
||||
backingStoreClient s3.Interface
|
||||
}
|
||||
|
||||
func newAttachmentOperator(storeClient s3.Interface) AttachmentInterface {
|
||||
return &attachmentOperator{
|
||||
backingStoreClient: storeClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *attachmentOperator) DescribeAttachment(id string) (*Attachment, error) {
|
||||
if c.backingStoreClient == nil {
|
||||
return nil, invalidS3Config
|
||||
}
|
||||
data, err := c.backingStoreClient.Read(id)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("read attachment %s failed, error: %s", id, err)
|
||||
return nil, downloadFileFailed
|
||||
}
|
||||
|
||||
att := &Attachment{AttachmentID: id,
|
||||
AttachmentContent: map[string]strfmt.Base64{
|
||||
"raw": data,
|
||||
},
|
||||
}
|
||||
|
||||
return att, nil
|
||||
}
|
||||
func (c *attachmentOperator) CreateAttachment(data []byte) (*Attachment, error) {
|
||||
if c.backingStoreClient == nil {
|
||||
return nil, invalidS3Config
|
||||
}
|
||||
id := idutils.GetUuid36(v1alpha1.HelmAttachmentPrefix)
|
||||
|
||||
err := c.backingStoreClient.Upload(id, id, bytes.NewBuffer(data), len(data))
|
||||
if err != nil {
|
||||
klog.Errorf("upload attachment failed, err: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
klog.V(4).Infof("upload attachment success")
|
||||
|
||||
att := &Attachment{AttachmentID: id}
|
||||
return att, nil
|
||||
}
|
||||
|
||||
func (c *attachmentOperator) DeleteAttachments(ids []string) error {
|
||||
if c.backingStoreClient == nil {
|
||||
return invalidS3Config
|
||||
}
|
||||
for _, id := range ids {
|
||||
err := c.backingStoreClient.Delete(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
typed_v1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
listers_v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
)
|
||||
|
||||
type CategoryInterface interface {
|
||||
CreateCategory(request *CreateCategoryRequest) (*CreateCategoryResponse, error)
|
||||
DeleteCategory(id string) error
|
||||
ModifyCategory(id string, request *ModifyCategoryRequest) error
|
||||
ListCategories(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
DescribeCategory(id string) (*Category, error)
|
||||
}
|
||||
|
||||
type categoryOperator struct {
|
||||
ctgClient typed_v1alpha1.ApplicationV1alpha1Interface
|
||||
ctgLister listers_v1alpha1.HelmCategoryLister
|
||||
repoCache reposcache.ReposCache
|
||||
}
|
||||
|
||||
func newCategoryOperator(repoCache reposcache.ReposCache, ksFactory externalversions.SharedInformerFactory, ksClient versioned.Interface) CategoryInterface {
|
||||
c := &categoryOperator{
|
||||
ctgClient: ksClient.ApplicationV1alpha1(),
|
||||
ctgLister: ksFactory.Application().V1alpha1().HelmCategories().Lister(),
|
||||
repoCache: repoCache,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *categoryOperator) getCategoryByName(name string) (*v1alpha1.HelmCategory, error) {
|
||||
ctgs, err := c.ctgLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ctg := range ctgs {
|
||||
if name == ctg.Spec.Name {
|
||||
return ctg, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *categoryOperator) createCategory(name, desc string) (*v1alpha1.HelmCategory, error) {
|
||||
ctg := &v1alpha1.HelmCategory{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmCategoryIdPrefix),
|
||||
},
|
||||
Spec: v1alpha1.HelmCategorySpec{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
|
||||
return c.ctgClient.HelmCategories().Create(context.TODO(), ctg, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (c *categoryOperator) CreateCategory(request *CreateCategoryRequest) (*CreateCategoryResponse, error) {
|
||||
|
||||
ctg, err := c.getCategoryByName(request.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ctg != nil {
|
||||
return nil, errors.New("category %s exists", ctg.Spec.Name)
|
||||
}
|
||||
|
||||
ctg, err = c.createCategory(request.Name, request.Description)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CreateCategoryResponse{
|
||||
CategoryId: ctg.Name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *categoryOperator) DeleteCategory(id string) error {
|
||||
ctg, err := c.ctgClient.HelmCategories().Get(context.TODO(), id, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if ctg.Status.Total > 0 {
|
||||
return errors.New("category %s owns application", ctg.Spec.Name)
|
||||
}
|
||||
|
||||
err = c.ctgClient.HelmCategories().Delete(context.TODO(), id, metav1.DeleteOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *categoryOperator) ModifyCategory(id string, request *ModifyCategoryRequest) error {
|
||||
|
||||
ctg, err := c.ctgClient.HelmCategories().Get(context.TODO(), id, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.New("category %s not found", id)
|
||||
}
|
||||
ctgCopy := ctg.DeepCopy()
|
||||
|
||||
if request.Name != nil {
|
||||
ctgCopy.Spec.Name = *request.Name
|
||||
}
|
||||
|
||||
if request.Description != nil {
|
||||
ctgCopy.Spec.Description = *request.Description
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(ctg)
|
||||
data, err := patch.Data(ctgCopy)
|
||||
if err != nil {
|
||||
klog.Error("create patch failed", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.ctgClient.HelmCategories().Patch(context.TODO(), id, patch.Type(), data, metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *categoryOperator) DescribeCategory(id string) (*Category, error) {
|
||||
var err error
|
||||
var ctg *v1alpha1.HelmCategory
|
||||
ctg, err = c.ctgClient.HelmCategories().Get(context.TODO(), id, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return convertCategory(ctg), nil
|
||||
}
|
||||
|
||||
func (c *categoryOperator) ListCategories(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
ctgs, err := c.ctgLister.List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Sort(HelmCategoryList(ctgs))
|
||||
|
||||
totalCount := len(ctgs)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
ctgs = ctgs[start:end]
|
||||
items := make([]interface{}, 0, len(ctgs))
|
||||
|
||||
ctgCountsOfBuiltinRepo := c.repoCache.CopyCategoryCount()
|
||||
for i := range ctgs {
|
||||
convertedCtg := convertCategory(ctgs[i])
|
||||
// The statistic of category for app in etcd is stored in the crd.
|
||||
// The statistic of category for the app in the built-in repo is stored in the memory.
|
||||
// So we should calculate these two value then return.
|
||||
*convertedCtg.AppTotal += ctgCountsOfBuiltinRepo[convertedCtg.CategoryID]
|
||||
items = append(items, convertedCtg)
|
||||
}
|
||||
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakek8s "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
)
|
||||
|
||||
func TestOpenPitrixCategory(t *testing.T) {
|
||||
ctgOperator := prepareCategoryOperator()
|
||||
|
||||
ctgReq := &CreateCategoryRequest{
|
||||
Name: "test-ctg",
|
||||
}
|
||||
|
||||
// create category
|
||||
ctgResp, err := ctgOperator.CreateCategory(ctgReq)
|
||||
if err != nil {
|
||||
klog.Errorf("create category failed")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// add category to indexer
|
||||
ctgs, _ := ksClient.ApplicationV1alpha1().HelmCategories().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, ctg := range ctgs.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmCategories().
|
||||
Informer().GetIndexer().Add(&ctg)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add category to indexer")
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// describe category
|
||||
cond := ¶ms.Conditions{}
|
||||
ctgList, err := ctgOperator.ListCategories(cond, "", false, 10, 0)
|
||||
if err != nil {
|
||||
klog.Errorf("list app failed, err: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if len(ctgList.Items) != 1 {
|
||||
klog.Errorf("list app failed")
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// describe category
|
||||
ctg, err := ctgOperator.DescribeCategory(ctgResp.CategoryId)
|
||||
if err != nil {
|
||||
klog.Errorf("describe app failed, err: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
_ = ctg
|
||||
|
||||
}
|
||||
|
||||
func prepareCategoryOperator() CategoryInterface {
|
||||
ksClient = fakeks.NewSimpleClientset()
|
||||
k8sClient = fakek8s.NewSimpleClientset()
|
||||
fakeInformerFactory = informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil)
|
||||
|
||||
return newCategoryOperator(reposcache.NewReposCache(), fakeInformerFactory.KubeSphereSharedInformerFactory(), ksClient)
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
uploadChartDataFailed = errors.New("upload chart data failed")
|
||||
invalidS3Config = errors.New("invalid storage config")
|
||||
deleteDataInStorageFailed = errors.New("delete data in storage failed")
|
||||
repoItemExists = errors.New("repo exists")
|
||||
appItemExists = errors.New("application exists")
|
||||
appVersionItemExists = errors.New("application version exists")
|
||||
actionNotSupport = errors.New("action not support")
|
||||
actionNotPermitted = errors.New("action not permitted")
|
||||
|
||||
loadRepoInfoFailed = errors.New("load repo info failed")
|
||||
downloadFileFailed = errors.New("download file failed")
|
||||
)
|
||||
@@ -1,87 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
ks_informers "kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/s3"
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
AttachmentInterface
|
||||
ApplicationInterface
|
||||
RepoInterface
|
||||
ReleaseInterface
|
||||
CategoryInterface
|
||||
}
|
||||
|
||||
type openpitrixOperator struct {
|
||||
AttachmentInterface
|
||||
ApplicationInterface
|
||||
RepoInterface
|
||||
ReleaseInterface
|
||||
CategoryInterface
|
||||
}
|
||||
|
||||
func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient versioned.Interface, s3Client s3.Interface, cc clusterclient.ClusterClients) Interface {
|
||||
klog.Infof("start helm repo informer")
|
||||
cachedReposData := reposcache.NewReposCache()
|
||||
helmReposInformer := ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmRepos().Informer()
|
||||
helmReposInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
r := obj.(*v1alpha1.HelmRepo)
|
||||
cachedReposData.AddRepo(r)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldRepo := oldObj.(*v1alpha1.HelmRepo)
|
||||
newRepo := newObj.(*v1alpha1.HelmRepo)
|
||||
cachedReposData.UpdateRepo(oldRepo, newRepo)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
r := obj.(*v1alpha1.HelmRepo)
|
||||
cachedReposData.DeleteRepo(r)
|
||||
},
|
||||
})
|
||||
|
||||
ctgInformer := ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmCategories().Informer()
|
||||
ctgInformer.AddIndexers(map[string]cache.IndexFunc{
|
||||
reposcache.CategoryIndexer: func(obj interface{}) ([]string, error) {
|
||||
ctg, _ := obj.(*v1alpha1.HelmCategory)
|
||||
return []string{ctg.Spec.Name}, nil
|
||||
},
|
||||
})
|
||||
indexer := ctgInformer.GetIndexer()
|
||||
|
||||
cachedReposData.SetCategoryIndexer(indexer)
|
||||
|
||||
return &openpitrixOperator{
|
||||
AttachmentInterface: newAttachmentOperator(s3Client),
|
||||
ApplicationInterface: newApplicationOperator(cachedReposData, ksInformers.KubeSphereSharedInformerFactory(), ksClient, s3Client),
|
||||
RepoInterface: newRepoOperator(cachedReposData, ksInformers.KubeSphereSharedInformerFactory(), ksClient),
|
||||
ReleaseInterface: newReleaseOperator(cachedReposData, ksInformers.KubernetesSharedInformerFactory(), ksInformers.KubeSphereSharedInformerFactory(), ksClient, cc),
|
||||
CategoryInterface: newCategoryOperator(cachedReposData, ksInformers.KubeSphereSharedInformerFactory(), ksClient),
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
appTemplateCreationCounter = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Name: "application_template_creation",
|
||||
Help: "Counter of application template creation broken out for each workspace, name and create state",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"workspace", "name", "state"},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
metrics.MustRegister(appTemplateCreationCounter)
|
||||
}
|
||||
@@ -1,420 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
typed_v1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
listers_v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmwrapper"
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
"kubesphere.io/kubesphere/pkg/utils/resourceparse"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
type ReleaseInterface interface {
|
||||
ListApplications(workspace, clusterName, namespace string, conditions *params.Conditions, limit, offset int, orderBy string, reverse bool) (*models.PageableResponse, error)
|
||||
DescribeApplication(workspace, clusterName, namespace, applicationId string) (*Application, error)
|
||||
CreateApplication(workspace, clusterName, namespace string, request CreateClusterRequest) error
|
||||
ModifyApplication(request ModifyClusterAttributesRequest) error
|
||||
DeleteApplication(workspace, clusterName, namespace, id string) error
|
||||
UpgradeApplication(request UpgradeClusterRequest, applicationId string) error
|
||||
}
|
||||
|
||||
type releaseOperator struct {
|
||||
informers informers.SharedInformerFactory
|
||||
rlsClient typed_v1alpha1.HelmReleaseInterface
|
||||
rlsLister listers_v1alpha1.HelmReleaseLister
|
||||
appVersionLister listers_v1alpha1.HelmApplicationVersionLister
|
||||
cachedRepos reposcache.ReposCache
|
||||
clusterClients clusterclient.ClusterClients
|
||||
}
|
||||
|
||||
func newReleaseOperator(cached reposcache.ReposCache, k8sFactory informers.SharedInformerFactory, ksFactory externalversions.SharedInformerFactory, ksClient versioned.Interface, cc clusterclient.ClusterClients) ReleaseInterface {
|
||||
c := &releaseOperator{
|
||||
informers: k8sFactory,
|
||||
rlsClient: ksClient.ApplicationV1alpha1().HelmReleases(),
|
||||
rlsLister: ksFactory.Application().V1alpha1().HelmReleases().Lister(),
|
||||
cachedRepos: cached,
|
||||
clusterClients: cc,
|
||||
appVersionLister: ksFactory.Application().V1alpha1().HelmApplicationVersions().Lister(),
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type Application struct {
|
||||
Name string `json:"name" description:"application name"`
|
||||
Cluster *Cluster `json:"cluster,omitempty" description:"application cluster info"`
|
||||
Version *AppVersion `json:"version,omitempty" description:"application template version info"`
|
||||
App *App `json:"app,omitempty" description:"application template info"`
|
||||
|
||||
ReleaseInfo []runtime.Object `json:"releaseInfo,omitempty" description:"release info"`
|
||||
}
|
||||
|
||||
func (c *releaseOperator) UpgradeApplication(request UpgradeClusterRequest, applicationId string) error {
|
||||
oldRls, err := c.rlsLister.Get(applicationId)
|
||||
// todo check namespace
|
||||
if err != nil {
|
||||
klog.Errorf("get release %s/%s failed, error: %s", request.Namespace, applicationId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch oldRls.Status.State {
|
||||
case v1alpha1.StateActive, v1alpha1.HelmStatusUpgraded, v1alpha1.HelmStatusCreated, v1alpha1.HelmStatusFailed:
|
||||
// no operation
|
||||
default:
|
||||
return errors.New("can not upgrade application now")
|
||||
}
|
||||
|
||||
version, err := c.getAppVersion("", request.VersionId)
|
||||
if err != nil {
|
||||
klog.Errorf("get helm application version %s/%s failed, error: %s", request.AppId, request.VersionId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
newRls := oldRls.DeepCopy()
|
||||
newRls.Spec.ApplicationId = request.AppId
|
||||
newRls.Spec.ApplicationVersionId = request.VersionId
|
||||
|
||||
newRls.Spec.Version += 1
|
||||
newRls.Spec.RepoId = version.GetHelmRepoId()
|
||||
newRls.Spec.ChartVersion = version.GetChartVersion()
|
||||
newRls.Spec.ChartAppVersion = version.GetChartAppVersion()
|
||||
// Use the new conf if the client has one, or server will just use the old conf.
|
||||
if request.Conf != "" {
|
||||
newRls.Spec.Values = strfmt.Base64(request.Conf)
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(oldRls)
|
||||
data, _ := patch.Data(newRls)
|
||||
|
||||
_, err = c.rlsClient.Patch(context.TODO(), applicationId, patch.Type(), data, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("patch release %s/%s failed, error: %s", request.Namespace, applicationId, err)
|
||||
return err
|
||||
} else {
|
||||
klog.V(2).Infof("patch release %s/%s success", request.Namespace, applicationId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// create all helm release in host cluster
|
||||
func (c *releaseOperator) CreateApplication(workspace, clusterName, namespace string, request CreateClusterRequest) error {
|
||||
version, err := c.getAppVersion("", request.VersionId)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("get helm application version %s failed, error: %v", request.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
exists, err := c.releaseExists(workspace, clusterName, namespace, request.Name)
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("get helm release %s failed, error: %v", request.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
err = fmt.Errorf("release %s exists", request.Name)
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
rls := &v1alpha1.HelmRelease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmReleasePrefix),
|
||||
Annotations: map[string]string{
|
||||
constants.CreatorAnnotationKey: request.Username,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
constants.ChartApplicationVersionIdLabelKey: request.VersionId,
|
||||
constants.ChartApplicationIdLabelKey: strings.TrimSuffix(request.AppId, v1alpha1.HelmApplicationAppStoreSuffix),
|
||||
constants.WorkspaceLabelKey: request.Workspace,
|
||||
constants.NamespaceLabelKey: namespace,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.HelmReleaseSpec{
|
||||
Name: request.Name,
|
||||
Description: stringutils.ShortenString(request.Description, v1alpha1.MsgLen),
|
||||
Version: 1,
|
||||
Values: strfmt.Base64(request.Conf),
|
||||
ApplicationId: strings.TrimSuffix(request.AppId, v1alpha1.HelmApplicationAppStoreSuffix),
|
||||
ApplicationVersionId: request.VersionId,
|
||||
ChartName: version.GetTrueName(),
|
||||
RepoId: version.GetHelmRepoId(),
|
||||
ChartVersion: version.GetChartVersion(),
|
||||
ChartAppVersion: version.GetChartAppVersion(),
|
||||
},
|
||||
}
|
||||
|
||||
if clusterName != "" {
|
||||
rls.Labels[constants.ClusterNameLabelKey] = clusterName
|
||||
}
|
||||
|
||||
if repoId := version.GetHelmRepoId(); repoId != "" {
|
||||
rls.Labels[constants.ChartRepoIdLabelKey] = repoId
|
||||
}
|
||||
|
||||
_, err = c.rlsClient.Create(context.TODO(), rls, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
} else {
|
||||
klog.Infof("create helm release %s success in %s", request.Name, namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *releaseOperator) releaseExists(workspace, clusterName, namespace, name string) (bool, error) {
|
||||
set := map[string]string{
|
||||
constants.WorkspaceLabelKey: workspace,
|
||||
constants.NamespaceLabelKey: namespace,
|
||||
}
|
||||
if clusterName != "" {
|
||||
set[constants.ClusterNameLabelKey] = clusterName
|
||||
}
|
||||
|
||||
list, err := c.rlsLister.List(labels.SelectorFromSet(set))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, rls := range list {
|
||||
if rls.Spec.Name == name {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *releaseOperator) ModifyApplication(request ModifyClusterAttributesRequest) error {
|
||||
|
||||
if request.Description == nil || len(*request.Description) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rls, err := c.rlsLister.Get(request.ClusterID)
|
||||
if err != nil {
|
||||
klog.Errorf("get release failed, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
rlsCopy := rls.DeepCopy()
|
||||
rlsCopy.Spec.Description = stringutils.ShortenString(strings.TrimSpace(*request.Description), v1alpha1.MsgLen)
|
||||
|
||||
pt := client.MergeFrom(rls)
|
||||
|
||||
data, err := pt.Data(rlsCopy)
|
||||
if err != nil {
|
||||
klog.Errorf("create patch failed, error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.rlsClient.Patch(context.TODO(), request.ClusterID, pt.Type(), data, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *releaseOperator) ListApplications(workspace, clusterName, namespace string, conditions *params.Conditions, limit, offset int, orderBy string, reverse bool) (*models.PageableResponse, error) {
|
||||
appId := conditions.Match[AppId]
|
||||
versionId := conditions.Match[VersionId]
|
||||
ls := map[string]string{}
|
||||
if appId != "" {
|
||||
ls[constants.ChartApplicationIdLabelKey] = strings.TrimSuffix(appId, v1alpha1.HelmApplicationAppStoreSuffix)
|
||||
}
|
||||
|
||||
if versionId != "" {
|
||||
ls[constants.ChartApplicationVersionIdLabelKey] = versionId
|
||||
}
|
||||
|
||||
repoId := conditions.Match[RepoId]
|
||||
if repoId != "" {
|
||||
ls[constants.ChartRepoIdLabelKey] = repoId
|
||||
}
|
||||
|
||||
if workspace != "" {
|
||||
ls[constants.WorkspaceLabelKey] = workspace
|
||||
}
|
||||
if namespace != "" {
|
||||
ls[constants.NamespaceLabelKey] = namespace
|
||||
}
|
||||
if clusterName != "" {
|
||||
ls[constants.ClusterNameLabelKey] = clusterName
|
||||
}
|
||||
|
||||
releases, err := c.rlsLister.List(labels.SelectorFromSet(ls))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list app release failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
releases = filterReleases(releases, conditions)
|
||||
|
||||
// only show release whose app versions are active or suspended
|
||||
if versionId == "" && strings.HasSuffix(appId, v1alpha1.HelmApplicationAppStoreSuffix) {
|
||||
stripId := strings.TrimSuffix(appId, v1alpha1.HelmApplicationAppStoreSuffix)
|
||||
versions, err := c.appVersionLister.List(labels.SelectorFromSet(map[string]string{constants.ChartApplicationIdLabelKey: stripId}))
|
||||
if err != nil {
|
||||
klog.Errorf("list app version failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
versions = filterAppVersionByState(versions, []string{v1alpha1.StateActive, v1alpha1.StateSuspended})
|
||||
versionMap := make(map[string]*v1alpha1.HelmApplicationVersion)
|
||||
for _, version := range versions {
|
||||
versionMap[version.Name] = version
|
||||
}
|
||||
releases = filterReleasesWithAppVersions(releases, versionMap)
|
||||
}
|
||||
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(HelmReleaseList(releases)))
|
||||
} else {
|
||||
sort.Sort(HelmReleaseList(releases))
|
||||
}
|
||||
|
||||
totalCount := len(releases)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
releases = releases[start:end]
|
||||
items := make([]interface{}, 0, len(releases))
|
||||
for i := range releases {
|
||||
app := convertApplication(releases[i], nil)
|
||||
items = append(items, app)
|
||||
}
|
||||
|
||||
return &models.PageableResponse{TotalCount: totalCount, Items: items}, nil
|
||||
}
|
||||
|
||||
func (c *releaseOperator) DescribeApplication(workspace, clusterName, namespace, applicationId string) (*Application, error) {
|
||||
|
||||
rls, err := c.rlsLister.Get(applicationId)
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list helm release failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
app := &Application{}
|
||||
|
||||
var clusterConfig string
|
||||
if rls != nil {
|
||||
// TODO check clusterName, workspace, namespace
|
||||
if clusterName != "" {
|
||||
cluster, err := c.clusterClients.Get(clusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("get cluster config failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
if !c.clusterClients.IsHostCluster(cluster) {
|
||||
clusterConfig, err = c.clusterClients.GetClusterKubeconfig(rls.GetRlsCluster())
|
||||
if err != nil {
|
||||
klog.Errorf("get cluster config failed, error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If clusterConfig is empty, this application will be installed in current host.
|
||||
hw := helmwrapper.NewHelmWrapper(clusterConfig, namespace, rls.Spec.Name)
|
||||
manifest, err := hw.Manifest()
|
||||
if err != nil {
|
||||
klog.Errorf("get manifest failed, error: %s", err)
|
||||
}
|
||||
infos, err := resourceparse.Parse(bytes.NewBufferString(manifest), namespace, rls.Spec.Name, true)
|
||||
if err != nil {
|
||||
klog.Errorf("parse resource failed, error: %s", err)
|
||||
}
|
||||
app = convertApplication(rls, infos)
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func (c *releaseOperator) DeleteApplication(workspace, clusterName, namespace, id string) error {
|
||||
|
||||
_, err := c.rlsLister.Get(id)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("get release %s/%s failed, err: %s", namespace, id, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO, check workspace, cluster and namespace
|
||||
|
||||
err = c.rlsClient.Delete(context.TODO(), id, metav1.DeleteOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("delete release %s/%s failed, error: %s", namespace, id, err)
|
||||
return err
|
||||
} else {
|
||||
klog.V(2).Infof("delete release %s/%s", namespace, id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// get app version from repo and helm application
|
||||
func (c *releaseOperator) getAppVersion(repoId, id string) (ret *v1alpha1.HelmApplicationVersion, err error) {
|
||||
if ver, exists, _ := c.cachedRepos.GetAppVersion(id); exists {
|
||||
return ver, nil
|
||||
}
|
||||
|
||||
if repoId != "" && repoId != v1alpha1.AppStoreRepoId {
|
||||
return nil, fmt.Errorf("app version not found")
|
||||
}
|
||||
ret, err = c.appVersionLister.Get(id)
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
)
|
||||
|
||||
func TestOpenPitrixRelease(t *testing.T) {
|
||||
appOperator := prepareAppOperator()
|
||||
|
||||
chartData, _ := base64.RawStdEncoding.DecodeString(rawChartData)
|
||||
|
||||
appReq := &CreateAppRequest{
|
||||
Isv: testWorkspace,
|
||||
Name: "test-chart",
|
||||
VersionName: "0.1.0",
|
||||
VersionPackage: strfmt.Base64(chartData),
|
||||
}
|
||||
|
||||
// create app
|
||||
createAppResp, err := appOperator.CreateApp(appReq)
|
||||
if err != nil {
|
||||
klog.Errorf("create app failed")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// add app to indexer
|
||||
apps, _ := ksClient.ApplicationV1alpha1().HelmApplications().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, app := range apps.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmApplications().
|
||||
Informer().GetIndexer().Add(&app)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add app to indexer")
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// add app version to indexer
|
||||
appvers, _ := ksClient.ApplicationV1alpha1().HelmApplicationVersions().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, ver := range appvers.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmApplicationVersions().
|
||||
Informer().GetIndexer().Add(&ver)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add app version to indexer")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
rlsOperator := newReleaseOperator(reposcache.NewReposCache(), fakeInformerFactory.KubernetesSharedInformerFactory(), fakeInformerFactory.KubeSphereSharedInformerFactory(), ksClient, nil)
|
||||
|
||||
req := CreateClusterRequest{
|
||||
Name: "test-rls",
|
||||
AppId: createAppResp.AppID,
|
||||
VersionId: createAppResp.VersionID,
|
||||
Workspace: testWorkspace,
|
||||
}
|
||||
err = rlsOperator.CreateApplication(testWorkspace, "", "default", req)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("create release failed, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// add app version to indexer
|
||||
rls, _ := ksClient.ApplicationV1alpha1().HelmReleases().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, item := range rls.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmReleases().
|
||||
Informer().GetIndexer().Add(&item)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add release to indexer")
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
cond := ¶ms.Conditions{Match: map[string]string{
|
||||
WorkspaceLabel: testWorkspace,
|
||||
}}
|
||||
rlsList, err := rlsOperator.ListApplications(testWorkspace, "", "default", cond, 10, 0, "", false)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list release, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
var rlsId string
|
||||
for _, item := range rlsList.Items {
|
||||
app := item.(*Application)
|
||||
rlsId = app.Cluster.ClusterId
|
||||
break
|
||||
}
|
||||
|
||||
//describe release
|
||||
describeRls, err := rlsOperator.DescribeApplication(testWorkspace, "", "default", rlsId)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to describe release, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
_ = describeRls
|
||||
|
||||
//delete release
|
||||
err = rlsOperator.DeleteApplication(testWorkspace, "", "default", rlsId)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete release, error: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakek8s "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
)
|
||||
|
||||
func TestOpenPitrixRepo(t *testing.T) {
|
||||
repoOperator := prepareRepoOperator()
|
||||
|
||||
repo := v1alpha1.HelmRepo{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmRepoIdPrefix),
|
||||
|
||||
Labels: map[string]string{
|
||||
constants.WorkspaceLabelKey: testWorkspace,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.HelmRepoSpec{
|
||||
Name: "test-repo",
|
||||
Url: "https://charts.kubesphere.io/main",
|
||||
SyncPeriod: 0,
|
||||
},
|
||||
}
|
||||
|
||||
// validate repo
|
||||
validateRes, err := repoOperator.ValidateRepo(repo.Spec.Url, &repo.Spec.Credential)
|
||||
if err != nil || validateRes.Ok == false {
|
||||
klog.Errorf("validate category failed, error: %s", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// validate the corrupt repo
|
||||
_, err = repoOperator.ValidateRepo("http://www.baidu.com", &repo.Spec.Credential)
|
||||
if err == nil {
|
||||
klog.Errorf("validate category failed")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// create repo
|
||||
repoResp, err := repoOperator.CreateRepo(&repo)
|
||||
if err != nil {
|
||||
klog.Errorf("create category failed")
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// add category to indexer
|
||||
repos, _ := ksClient.ApplicationV1alpha1().HelmRepos().List(context.TODO(), metav1.ListOptions{})
|
||||
for _, repo := range repos.Items {
|
||||
err := fakeInformerFactory.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmRepos().
|
||||
Informer().GetIndexer().Add(&repo)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to add repo to indexer")
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// list repo
|
||||
cond := ¶ms.Conditions{Match: map[string]string{WorkspaceLabel: testWorkspace}}
|
||||
repoList, err := repoOperator.ListRepos(cond, "", false, 10, 0)
|
||||
if err != nil {
|
||||
klog.Errorf("list repo failed, err: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if len(repoList.Items) != 1 {
|
||||
klog.Errorf("list repo failed")
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// describe repo
|
||||
describeRepo, err := repoOperator.DescribeRepo(repoResp.RepoID)
|
||||
if err != nil {
|
||||
klog.Errorf("describe app failed, err: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
_ = describeRepo
|
||||
|
||||
}
|
||||
|
||||
func prepareRepoOperator() RepoInterface {
|
||||
ksClient = fakeks.NewSimpleClientset()
|
||||
k8sClient = fakek8s.NewSimpleClientset()
|
||||
fakeInformerFactory = informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil)
|
||||
|
||||
return newRepoOperator(reposcache.NewReposCache(), fakeInformerFactory.KubeSphereSharedInformerFactory(), ksClient)
|
||||
}
|
||||
@@ -1,358 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
typed_v1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
listers_v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/application/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
|
||||
"kubesphere.io/kubesphere/pkg/utils/reposcache"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
const DescriptionLen = 512
|
||||
|
||||
type RepoInterface interface {
|
||||
CreateRepo(repo *v1alpha1.HelmRepo) (*CreateRepoResponse, error)
|
||||
DeleteRepo(id string) error
|
||||
ValidateRepo(u string, request *v1alpha1.HelmRepoCredential) (*ValidateRepoResponse, error)
|
||||
ModifyRepo(id string, request *ModifyRepoRequest) error
|
||||
DescribeRepo(id string) (*Repo, error)
|
||||
ListRepos(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error)
|
||||
DoRepoAction(repoId string, request *RepoActionRequest) error
|
||||
ListRepoEvents(repoId string, conditions *params.Conditions, limit, offset int) (*models.PageableResponse, error)
|
||||
}
|
||||
|
||||
type repoOperator struct {
|
||||
cachedRepos reposcache.ReposCache
|
||||
informers externalversions.SharedInformerFactory
|
||||
repoClient typed_v1alpha1.ApplicationV1alpha1Interface
|
||||
repoLister listers_v1alpha1.HelmRepoLister
|
||||
rlsLister listers_v1alpha1.HelmReleaseLister
|
||||
}
|
||||
|
||||
func newRepoOperator(cachedRepos reposcache.ReposCache, informers externalversions.SharedInformerFactory, ksClient versioned.Interface) RepoInterface {
|
||||
return &repoOperator{
|
||||
cachedRepos: cachedRepos,
|
||||
informers: informers,
|
||||
repoClient: ksClient.ApplicationV1alpha1(),
|
||||
repoLister: informers.Application().V1alpha1().HelmRepos().Lister(),
|
||||
rlsLister: informers.Application().V1alpha1().HelmReleases().Lister(),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO implement DoRepoAction
|
||||
func (c *repoOperator) DoRepoAction(repoId string, request *RepoActionRequest) error {
|
||||
repo, err := c.repoLister.Get(repoId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if request.Workspace != repo.GetWorkspace() {
|
||||
return nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(repo)
|
||||
copyRepo := repo.DeepCopy()
|
||||
copyRepo.Spec.Version += 1
|
||||
data, err := patch.Data(copyRepo)
|
||||
if err != nil {
|
||||
klog.Errorf("create patch [%s] failed, error: %s", repoId, err)
|
||||
return err
|
||||
}
|
||||
_, err = c.repoClient.HelmRepos().Patch(context.TODO(), repoId, types.MergePatchType, data, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("patch repo [%s] failed, error: %s", repoId, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) ValidateRepo(u string, cred *v1alpha1.HelmRepoCredential) (*ValidateRepoResponse, error) {
|
||||
_, err := helmrepoindex.LoadRepoIndex(context.TODO(), u, cred)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ValidateRepoResponse{Ok: true}, nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) CreateRepo(repo *v1alpha1.HelmRepo) (*CreateRepoResponse, error) {
|
||||
name := repo.GetTrueName()
|
||||
|
||||
items, err := c.repoLister.List(labels.SelectorFromSet(map[string]string{constants.WorkspaceLabelKey: repo.GetWorkspace()}))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list helm repo failed: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, exists := range items {
|
||||
if exists.GetTrueName() == name {
|
||||
klog.Error(repoItemExists, "name: ", name)
|
||||
return nil, repoItemExists
|
||||
}
|
||||
}
|
||||
|
||||
repo.Spec.Description = stringutils.ShortenString(repo.Spec.Description, DescriptionLen)
|
||||
_, err = c.repoClient.HelmRepos().Create(context.TODO(), repo, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("create helm repo failed, repo_id: %s, error: %s", repo.GetHelmRepoId(), err)
|
||||
return nil, err
|
||||
} else {
|
||||
klog.V(4).Infof("create helm repo success, repo_id: %s", repo.GetHelmRepoId())
|
||||
}
|
||||
|
||||
return &CreateRepoResponse{repo.GetHelmRepoId()}, nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) DeleteRepo(id string) error {
|
||||
err := c.repoClient.HelmRepos().Delete(context.TODO(), id, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("delete repo %s failed, error: %s", id, err)
|
||||
return err
|
||||
}
|
||||
klog.V(4).Infof("repo %s deleted", id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) ModifyRepo(id string, request *ModifyRepoRequest) error {
|
||||
repo, err := c.repoClient.HelmRepos().Get(context.TODO(), id, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Error("get repo failed", err)
|
||||
return err
|
||||
}
|
||||
|
||||
repoCopy := repo.DeepCopy()
|
||||
if request.Description != nil {
|
||||
repoCopy.Spec.Description = stringutils.ShortenString(*request.Description, DescriptionLen)
|
||||
}
|
||||
|
||||
if repoCopy.Annotations == nil {
|
||||
repoCopy.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
if request.SyncPeriod != nil {
|
||||
syncPeriod := 0
|
||||
if *request.SyncPeriod == "" {
|
||||
// disable auto sync
|
||||
syncPeriod = 0
|
||||
} else {
|
||||
if duration, err := time.ParseDuration(*request.SyncPeriod); err != nil {
|
||||
return err
|
||||
} else {
|
||||
syncPeriod = int(duration / time.Second)
|
||||
}
|
||||
}
|
||||
if syncPeriod == 0 {
|
||||
// disable auto sync
|
||||
repoCopy.Spec.SyncPeriod = 0
|
||||
delete(repoCopy.Annotations, v1alpha1.RepoSyncPeriod)
|
||||
} else {
|
||||
repoCopy.Spec.SyncPeriod = syncPeriod
|
||||
repoCopy.Annotations[v1alpha1.RepoSyncPeriod] = *request.SyncPeriod
|
||||
}
|
||||
}
|
||||
|
||||
// modify name of the repo
|
||||
if request.Name != nil && len(*request.Name) > 0 && *request.Name != repoCopy.Spec.Name {
|
||||
items, err := c.repoLister.List(labels.SelectorFromSet(map[string]string{constants.WorkspaceLabelKey: repo.GetWorkspace()}))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list helm repo failed: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, exists := range items {
|
||||
if exists.GetTrueName() == *request.Name {
|
||||
klog.Error(repoItemExists, "name: ", *request.Name)
|
||||
return repoItemExists
|
||||
}
|
||||
}
|
||||
repoCopy.Spec.Name = *request.Name
|
||||
}
|
||||
|
||||
// modify url or credential
|
||||
if request.URL != nil && len(*request.URL) > 0 {
|
||||
parsedUrl, err := url.Parse(*request.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userInfo := parsedUrl.User
|
||||
// trim the credential from url
|
||||
parsedUrl.User = nil
|
||||
cred := &v1alpha1.HelmRepoCredential{}
|
||||
if strings.HasPrefix(*request.URL, "https://") || strings.HasPrefix(*request.URL, "http://") {
|
||||
if userInfo != nil {
|
||||
cred.Password, _ = userInfo.Password()
|
||||
cred.Username = userInfo.Username()
|
||||
}
|
||||
} else if strings.HasPrefix(*request.URL, "s3://") {
|
||||
cfg := v1alpha1.S3Config{}
|
||||
err := json.Unmarshal([]byte(*request.Credential), &cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cred.S3Config = cfg
|
||||
}
|
||||
|
||||
repoCopy.Spec.Credential = *cred
|
||||
repoCopy.Spec.Url = parsedUrl.String()
|
||||
|
||||
// validate repo
|
||||
_, err = c.ValidateRepo(repoCopy.Spec.Url, &repo.Spec.Credential)
|
||||
if err != nil {
|
||||
klog.Errorf("validate repo failed, err: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// change repo name and description won't change version
|
||||
repoCopy.Spec.Version += 1
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(repo)
|
||||
data, err := patch.Data(repoCopy)
|
||||
if err != nil {
|
||||
klog.Error("create patch failed", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// data == "{}", need not to patch
|
||||
if len(data) == 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = c.repoClient.HelmRepos().Patch(context.TODO(), id, patch.Type(), data, metav1.PatchOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) DescribeRepo(id string) (*Repo, error) {
|
||||
repo, err := c.repoClient.HelmRepos().Get(context.TODO(), id, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
retRepo := convertRepo(repo)
|
||||
return retRepo, nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) ListRepos(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
ls := labels.NewSelector()
|
||||
r, _ := labels.NewRequirement(constants.WorkspaceLabelKey, selection.Equals, []string{conditions.Match[WorkspaceLabel]})
|
||||
ls = ls.Add([]labels.Requirement{*r}...)
|
||||
|
||||
repos, err := c.repoLister.List(ls)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
if conditions.Match[Keyword] != "" {
|
||||
repos = helmRepoFilter(conditions.Match[Keyword], repos)
|
||||
}
|
||||
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(HelmRepoList(repos)))
|
||||
} else {
|
||||
sort.Sort(HelmRepoList(repos))
|
||||
}
|
||||
|
||||
totalCount := len(repos)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
repos = repos[start:end]
|
||||
items := make([]interface{}, 0, len(repos))
|
||||
for i := range repos {
|
||||
items = append(items, convertRepo(repos[i]))
|
||||
}
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
func helmRepoFilter(namePrefix string, list []*v1alpha1.HelmRepo) (res []*v1alpha1.HelmRepo) {
|
||||
lowerPrefix := strings.ToLower(namePrefix)
|
||||
for _, repo := range list {
|
||||
name := repo.GetTrueName()
|
||||
if strings.Contains(strings.ToLower(name), lowerPrefix) {
|
||||
res = append(res, repo)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type HelmRepoList []*v1alpha1.HelmRepo
|
||||
|
||||
func (l HelmRepoList) Len() int { return len(l) }
|
||||
func (l HelmRepoList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l HelmRepoList) Less(i, j int) bool {
|
||||
t1 := l[i].CreationTimestamp.UnixNano()
|
||||
t2 := l[j].CreationTimestamp.UnixNano()
|
||||
if t1 < t2 {
|
||||
return true
|
||||
} else if t1 > t2 {
|
||||
return false
|
||||
} else {
|
||||
n1 := l[i].GetTrueName()
|
||||
n2 := l[j].GetTrueName()
|
||||
return n1 < n2
|
||||
}
|
||||
}
|
||||
|
||||
func (c *repoOperator) ListRepoEvents(repoId string, conditions *params.Conditions, limit, offset int) (*models.PageableResponse, error) {
|
||||
|
||||
repo, err := c.repoClient.HelmRepos().Get(context.TODO(), repoId, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
states := repo.Status.SyncState
|
||||
totalCount := len(states)
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
states = states[start:end]
|
||||
items := make([]interface{}, 0, len(states))
|
||||
for i := range states {
|
||||
items = append(items, convertRepoEvent(&repo.ObjectMeta, &states[i]))
|
||||
}
|
||||
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
}
|
||||
@@ -1,929 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/strfmt"
|
||||
)
|
||||
|
||||
type ModifyAppRequest struct {
|
||||
// content of attachment
|
||||
AttachmentContent []byte `json:"attachment_content,omitempty"`
|
||||
|
||||
// only for screenshot, range: [0, 5]
|
||||
Sequence *int32 `json:"sequence,omitempty"`
|
||||
|
||||
// optional: icon/screenshot
|
||||
Type *string `json:"type,omitempty"`
|
||||
|
||||
// abstraction of app
|
||||
Abstraction *string `json:"abstraction,omitempty"`
|
||||
|
||||
// category id of the app
|
||||
CategoryID *string `json:"category_id,omitempty"`
|
||||
|
||||
// description of the app
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
// home page of the app
|
||||
Home *string `json:"home,omitempty"`
|
||||
|
||||
// key words of the app
|
||||
Keywords *string `json:"keywords,omitempty"`
|
||||
|
||||
// maintainers who maintainer the app
|
||||
Maintainers *string `json:"maintainers,omitempty"`
|
||||
|
||||
// name of the app
|
||||
Name *string `json:"name,omitempty"`
|
||||
|
||||
// instructions of the app
|
||||
Readme *string `json:"readme,omitempty"`
|
||||
|
||||
// sources of app
|
||||
Sources *string `json:"sources,omitempty"`
|
||||
|
||||
// tos of app
|
||||
Tos *string `json:"tos,omitempty"`
|
||||
}
|
||||
|
||||
type ModifyAppVersionRequest struct {
|
||||
// app description
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
// app name
|
||||
Name *string `json:"name,omitempty"`
|
||||
|
||||
// package of app to replace other
|
||||
Package []byte `json:"package,omitempty"`
|
||||
|
||||
// filename map to file_content
|
||||
PackageFiles map[string][]byte `json:"package_files,omitempty"`
|
||||
|
||||
// required, version id of app to modify
|
||||
VersionID *string `json:"version_id,omitempty"`
|
||||
}
|
||||
type AppVersionAudit struct {
|
||||
|
||||
// id of specific version app
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// name of specific version app
|
||||
AppName string `json:"app_name,omitempty"`
|
||||
|
||||
// audit message
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// user of auditer
|
||||
Operator string `json:"operator,omitempty"`
|
||||
|
||||
// operator of auditer eg.[global_admin|developer|business|technical|isv]
|
||||
OperatorType string `json:"operator_type,omitempty"`
|
||||
|
||||
// review id
|
||||
ReviewId string `json:"review_id,omitempty"`
|
||||
|
||||
// audit status, eg.[draft|submitted|passed|rejected|active|in-review|deleted|suspended]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
|
||||
// id of version to audit
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
|
||||
// version name
|
||||
VersionName string `json:"version_name,omitempty"`
|
||||
|
||||
// version type
|
||||
VersionType string `json:"version_type,omitempty"`
|
||||
}
|
||||
|
||||
type ValidatePackageRequest struct {
|
||||
|
||||
// required, version package eg.[the wordpress-0.0.1.tgz will be encoded to bytes]
|
||||
VersionPackage strfmt.Base64 `json:"version_package,omitempty"`
|
||||
|
||||
// optional, vmbased/helm
|
||||
VersionType string `json:"version_type,omitempty"`
|
||||
}
|
||||
type App struct {
|
||||
|
||||
// abstraction of app
|
||||
Abstraction string `json:"abstraction,omitempty"`
|
||||
|
||||
// whether there is a released version in the app
|
||||
Active bool `json:"active,omitempty"`
|
||||
|
||||
// app id
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// app version types eg.[vmbased|helm]
|
||||
AppVersionTypes string `json:"app_version_types,omitempty"`
|
||||
|
||||
// category set
|
||||
CategorySet AppCategorySet `json:"category_set"`
|
||||
|
||||
// chart name of app
|
||||
ChartName string `json:"chart_name,omitempty"`
|
||||
|
||||
// company join time
|
||||
CompanyJoinTime *strfmt.DateTime `json:"company_join_time,omitempty"`
|
||||
|
||||
// company name
|
||||
CompanyName string `json:"company_name,omitempty"`
|
||||
|
||||
// company profile
|
||||
CompanyProfile string `json:"company_profile,omitempty"`
|
||||
|
||||
// company website
|
||||
CompanyWebsite string `json:"company_website,omitempty"`
|
||||
|
||||
// the time when app create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// app description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// app home page
|
||||
Home string `json:"home,omitempty"`
|
||||
|
||||
// app icon
|
||||
Icon string `json:"icon,omitempty"`
|
||||
|
||||
// the isv user who create the app
|
||||
Isv string `json:"isv,omitempty"`
|
||||
|
||||
// app key words
|
||||
Keywords string `json:"keywords,omitempty"`
|
||||
|
||||
// latest version of app
|
||||
LatestAppVersion *AppVersion `json:"latest_app_version,omitempty"`
|
||||
|
||||
// app maintainers
|
||||
Maintainers string `json:"maintainers,omitempty"`
|
||||
|
||||
// app name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// owner of app
|
||||
Owner string `json:"owner,omitempty"`
|
||||
|
||||
// app instructions
|
||||
Readme string `json:"readme,omitempty"`
|
||||
|
||||
// repository(store app package) id
|
||||
RepoId string `json:"repo_id,omitempty"`
|
||||
|
||||
// app screenshots
|
||||
Screenshots string `json:"screenshots,omitempty"`
|
||||
|
||||
// sources of app
|
||||
Sources string `json:"sources,omitempty"`
|
||||
|
||||
// status eg.[modify|submit|review|cancel|release|delete|pass|reject|suspend|recover]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
|
||||
// tos of app
|
||||
Tos string `json:"tos,omitempty"`
|
||||
|
||||
// the time when app update
|
||||
UpdateTime *strfmt.DateTime `json:"update_time,omitempty"`
|
||||
|
||||
ClusterTotal *int `json:"cluster_total,omitempty"`
|
||||
}
|
||||
type AppVersionReviewPhase struct {
|
||||
|
||||
// review message
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// user of reviewer
|
||||
Operator string `json:"operator,omitempty"`
|
||||
|
||||
// operator type of reviewer eg.[global_admin|developer|business|technical|isv]
|
||||
OperatorType string `json:"operator_type,omitempty"`
|
||||
|
||||
// app version review time
|
||||
ReviewTime *strfmt.DateTime `json:"review_time,omitempty"`
|
||||
|
||||
// review status of app version eg.[isv-in-review|isv-passed|isv-rejected|isv-draft|business-in-review|business-passed|business-rejected|develop-draft|develop-in-review|develop-passed|develop-rejected|develop-draft]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
}
|
||||
|
||||
type AppVersionReviewPhaseOAIGen map[string]AppVersionReviewPhase
|
||||
|
||||
type GetAppVersionPackageResponse struct {
|
||||
|
||||
// app id of package
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// package of specific app version
|
||||
Package strfmt.Base64 `json:"package,omitempty"`
|
||||
|
||||
// version id of package
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
}
|
||||
|
||||
type GetAppVersionPackageFilesResponse struct {
|
||||
|
||||
// filename map to content
|
||||
Files map[string]strfmt.Base64 `json:"files,omitempty"`
|
||||
|
||||
// version id
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
}
|
||||
type AppVersionReview struct {
|
||||
|
||||
// app id
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// app name
|
||||
AppName string `json:"app_name,omitempty"`
|
||||
|
||||
// phase
|
||||
Phase AppVersionReviewPhaseOAIGen `json:"phase,omitempty"`
|
||||
|
||||
// review id
|
||||
ReviewId string `json:"review_id,omitempty"`
|
||||
|
||||
// user who review the app version
|
||||
Reviewer string `json:"reviewer,omitempty"`
|
||||
|
||||
// review status eg.[isv-in-review|isv-passed|isv-rejected|isv-draft|business-in-review|business-passed|business-rejected|develop-draft|develop-in-review|develop-passed|develop-rejected|develop-draft]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime strfmt.DateTime `json:"status_time,omitempty"`
|
||||
|
||||
// id of app version
|
||||
VersionID string `json:"version_id,omitempty"`
|
||||
|
||||
// version name of specific app version
|
||||
VersionName string `json:"version_name,omitempty"`
|
||||
|
||||
// version type
|
||||
VersionType string `json:"version_type,omitempty"`
|
||||
|
||||
// Workspace of the app version
|
||||
Workspace string `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type CreateAppRequest struct {
|
||||
|
||||
// app icon
|
||||
Icon string `json:"icon,omitempty"`
|
||||
|
||||
// isv
|
||||
Isv string `json:"isv,omitempty"`
|
||||
|
||||
// required, app name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// required, version name of the app
|
||||
VersionName string `json:"version_name,omitempty"`
|
||||
|
||||
// required, version with specific app package
|
||||
VersionPackage strfmt.Base64 `json:"version_package,omitempty"`
|
||||
|
||||
// optional, vmbased/helm
|
||||
VersionType string `json:"version_type,omitempty"`
|
||||
|
||||
Username string `json:"-"`
|
||||
}
|
||||
|
||||
type CreateAppResponse struct {
|
||||
|
||||
// app id
|
||||
AppID string `json:"app_id,omitempty"`
|
||||
|
||||
// version id of the app
|
||||
VersionID string `json:"version_id,omitempty"`
|
||||
}
|
||||
|
||||
type AppVersion struct {
|
||||
|
||||
// active or not
|
||||
Active bool `json:"active,omitempty"`
|
||||
|
||||
// app id
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// the time when app version create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// description of app of specific version
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// home of app of specific version
|
||||
Home string `json:"home,omitempty"`
|
||||
|
||||
// icon of app of specific version
|
||||
Icon string `json:"icon,omitempty"`
|
||||
|
||||
// keywords of app of specific version
|
||||
Keywords string `json:"keywords,omitempty"`
|
||||
|
||||
// maintainers of app of specific version
|
||||
Maintainers string `json:"maintainers,omitempty"`
|
||||
|
||||
// message path of app of specific version
|
||||
Message string `json:"message,omitempty"`
|
||||
|
||||
// version name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// owner
|
||||
Owner string `json:"owner,omitempty"`
|
||||
|
||||
// package name of app of specific version
|
||||
PackageName string `json:"package_name,omitempty"`
|
||||
|
||||
// readme of app of specific version
|
||||
Readme string `json:"readme,omitempty"`
|
||||
|
||||
// review id of app of specific version
|
||||
ReviewId string `json:"review_id,omitempty"`
|
||||
|
||||
// screenshots of app of specific version
|
||||
Screenshots string `json:"screenshots,omitempty"`
|
||||
|
||||
// sequence of app of specific version
|
||||
Sequence int64 `json:"sequence,omitempty"`
|
||||
|
||||
// sources of app of specific version
|
||||
Sources string `json:"sources,omitempty"`
|
||||
|
||||
// status of app of specific version eg.[draft|submitted|passed|rejected|active|in-review|deleted|suspended]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
|
||||
// type of app of specific version
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// the time when app version update
|
||||
UpdateTime *strfmt.DateTime `json:"update_time,omitempty"`
|
||||
|
||||
// version id of app
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
|
||||
ClusterTotal *int `json:"cluster_total,omitempty"`
|
||||
}
|
||||
|
||||
type CreateAppVersionResponse struct {
|
||||
|
||||
// version id
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
}
|
||||
|
||||
type ValidatePackageResponse struct {
|
||||
|
||||
// app description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// error eg.[json error]
|
||||
Error string `json:"error,omitempty"`
|
||||
|
||||
// filename map to detail
|
||||
ErrorDetails map[string]string `json:"error_details,omitempty"`
|
||||
|
||||
// app name eg.[wordpress|mysql|...]
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// app url
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// app version name.eg.[0.1.0]
|
||||
VersionName string `json:"version_name,omitempty"`
|
||||
|
||||
Icon string `json:"icon,omitempty"`
|
||||
}
|
||||
|
||||
type CreateAppVersionRequest struct {
|
||||
|
||||
// required, id of app to create new version
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// description of app of specific version
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// required, version name eg.[0.1.0|0.1.3|...]
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// package of app of specific version
|
||||
Package strfmt.Base64 `json:"package,omitempty"`
|
||||
|
||||
// optional: vmbased/helm
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
Username string `json:"-"`
|
||||
}
|
||||
|
||||
type GetAppVersionFilesRequest struct {
|
||||
Files []string `json:"files,omitempty"`
|
||||
}
|
||||
|
||||
type ActionRequest struct {
|
||||
Action string `json:"action"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Username string `json:"-"`
|
||||
}
|
||||
|
||||
type Attachment struct {
|
||||
|
||||
// filename map to content
|
||||
AttachmentContent map[string]strfmt.Base64 `json:"attachment_content,omitempty"`
|
||||
|
||||
// attachment id
|
||||
AttachmentID string `json:"attachment_id,omitempty"`
|
||||
|
||||
// the time attachment create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
}
|
||||
|
||||
type CreateCategoryRequest struct {
|
||||
|
||||
// category description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// category icon
|
||||
Icon strfmt.Base64 `json:"icon,omitempty"`
|
||||
|
||||
// the i18n of this category, json format, sample: {"zh_cn": "数据库", "en": "database"}
|
||||
Locale string `json:"locale,omitempty"`
|
||||
|
||||
// required, category name
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
type ModifyCategoryRequest struct {
|
||||
// category description
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
// category icon
|
||||
Icon []byte `json:"icon,omitempty"`
|
||||
|
||||
// the i18n of this category, json format, sample: {"zh_cn": "数据库", "en": "database"}
|
||||
Locale *string `json:"locale,omitempty"`
|
||||
|
||||
// category name
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type AppCategorySet []*ResourceCategory
|
||||
|
||||
type ResourceCategory struct {
|
||||
|
||||
// category id
|
||||
CategoryId string `json:"category_id,omitempty"`
|
||||
|
||||
// create time
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// locale
|
||||
Locale string `json:"locale,omitempty"`
|
||||
|
||||
// name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// status
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// status time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
}
|
||||
|
||||
type Category struct {
|
||||
|
||||
// category id
|
||||
CategoryID string `json:"category_id,omitempty"`
|
||||
|
||||
// the time when category create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// category description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// category icon
|
||||
Icon string `json:"icon,omitempty"`
|
||||
|
||||
// the i18n of this category, json format, sample: {"zh_cn": "数据库", "en": "database"}
|
||||
Locale string `json:"locale,omitempty"`
|
||||
|
||||
// category name,app belong to a category,eg.[AI|Firewall|cache|...]
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// owner
|
||||
Owner string `json:"owner,omitempty"`
|
||||
|
||||
AppTotal *int `json:"app_total,omitempty"`
|
||||
|
||||
// the time when category update
|
||||
UpdateTime *strfmt.DateTime `json:"update_time,omitempty"`
|
||||
}
|
||||
|
||||
type CreateCategoryResponse struct {
|
||||
|
||||
// id of category created
|
||||
CategoryId string `json:"category_id,omitempty"`
|
||||
}
|
||||
|
||||
type RepoEvent struct {
|
||||
// repository event create time
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// owner
|
||||
Owner string `json:"owner,omitempty"`
|
||||
|
||||
// repository event id
|
||||
RepoEventId string `json:"repo_event_id,omitempty"`
|
||||
|
||||
// repository id
|
||||
RepoId string `json:"repo_id,omitempty"`
|
||||
|
||||
// result
|
||||
Result string `json:"result,omitempty"`
|
||||
|
||||
// repository event status eg.[failed|successful|working|pending]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
}
|
||||
|
||||
type CreateRepoRequest struct {
|
||||
// required app default status.eg:[draft|active]
|
||||
AppDefaultStatus string `json:"app_default_status,omitempty"`
|
||||
|
||||
// category id
|
||||
CategoryId string `json:"category_id,omitempty"`
|
||||
|
||||
// required, credential of visiting the repository
|
||||
Credential string `json:"credential,omitempty"`
|
||||
|
||||
// repository description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// If workspace is empty, then it's a global repo
|
||||
Workspace *string `json:"workspace,omitempty"`
|
||||
|
||||
// required, repository name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// required, runtime provider eg.[qingcloud|aliyun|aws|kubernetes]
|
||||
Providers []string `json:"providers"`
|
||||
|
||||
// min sync period to sync helm repo, a duration string is a sequence of
|
||||
// decimal numbers, each with optional fraction and a unit suffix,
|
||||
// such as "180s", "2h" or "45m".
|
||||
SyncPeriod string `json:"sync_period"`
|
||||
|
||||
// repository type
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// required, url of visiting the repository
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// required, visibility eg:[public|private]
|
||||
Visibility string `json:"visibility,omitempty"`
|
||||
}
|
||||
|
||||
type RepoCategorySet []*ResourceCategory
|
||||
|
||||
type ModifyRepoRequest struct {
|
||||
// app default status eg:[draft|active]
|
||||
AppDefaultStatus *string `json:"app_default_status,omitempty"`
|
||||
|
||||
// category id
|
||||
CategoryID *string `json:"category_id,omitempty"`
|
||||
|
||||
// credential of visiting the repository
|
||||
Credential *string `json:"credential,omitempty"`
|
||||
|
||||
// repository description
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
Workspace *string `json:"workspace,omitempty"`
|
||||
|
||||
// min sync period to sync helm repo
|
||||
SyncPeriod *string `json:"sync_period"`
|
||||
|
||||
// repository name
|
||||
Name *string `json:"name,omitempty"`
|
||||
|
||||
// runtime provider eg.[qingcloud|aliyun|aws|kubernetes]
|
||||
Providers []string `json:"providers"`
|
||||
|
||||
// repository type
|
||||
Type *string `json:"type,omitempty"`
|
||||
|
||||
// url of visiting the repository
|
||||
URL *string `json:"url,omitempty"`
|
||||
|
||||
// visibility eg:[public|private]
|
||||
Visibility *string `json:"visibility,omitempty"`
|
||||
}
|
||||
|
||||
type RepoActionRequest struct {
|
||||
Action string `json:"action"`
|
||||
Workspace string `json:"workspace"`
|
||||
}
|
||||
|
||||
type ValidateRepoRequest struct {
|
||||
Type string `json:"type"`
|
||||
Credential string `json:"credential"`
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
type RepoSelector struct {
|
||||
// the time when repository selector create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// selector key
|
||||
SelectorKey string `json:"selector_key,omitempty"`
|
||||
|
||||
// selector value
|
||||
SelectorValue string `json:"selector_value,omitempty"`
|
||||
}
|
||||
type RepoLabel struct {
|
||||
// the time when repository label create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// label key
|
||||
LabelKey string `json:"label_key,omitempty"`
|
||||
|
||||
// label value
|
||||
LabelValue string `json:"label_value,omitempty"`
|
||||
}
|
||||
|
||||
type RepoLabels []*RepoLabel
|
||||
type RepoSelectors []*RepoSelector
|
||||
|
||||
type Repo struct {
|
||||
ChartCount int `json:"chart_count,omitempty"`
|
||||
|
||||
// app default status eg[active|draft]
|
||||
AppDefaultStatus string `json:"app_default_status,omitempty"`
|
||||
|
||||
// category set
|
||||
CategorySet RepoCategorySet `json:"category_set"`
|
||||
|
||||
// controller, value 0 for self resource, value 1 for openpitrix resource
|
||||
Controller int32 `json:"controller,omitempty"`
|
||||
|
||||
// the time when repository create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// credential of visiting the repository
|
||||
Credential string `json:"credential,omitempty"`
|
||||
|
||||
// repository description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// labels
|
||||
Labels RepoLabels `json:"labels"`
|
||||
|
||||
// repository name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// creator
|
||||
Creator string `json:"creator,omitempty"`
|
||||
|
||||
// runtime provider eg.[qingcloud|aliyun|aws|kubernetes]
|
||||
Providers []string `json:"providers"`
|
||||
|
||||
// repository id
|
||||
RepoId string `json:"repo_id,omitempty"`
|
||||
|
||||
// selectors
|
||||
Selectors RepoSelectors `json:"selectors"`
|
||||
|
||||
// status eg.[successful|failed|syncing]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime strfmt.DateTime `json:"status_time,omitempty"`
|
||||
|
||||
// type of repository eg.[http|https|s3]
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// url of visiting the repository
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// visibility.eg:[public|private]
|
||||
Visibility string `json:"visibility,omitempty"`
|
||||
|
||||
SyncPeriod string `json:"sync_period,omitempty"`
|
||||
}
|
||||
|
||||
type CreateRepoResponse struct {
|
||||
|
||||
// id of repository created
|
||||
RepoID string `json:"repo_id,omitempty"`
|
||||
}
|
||||
|
||||
type ValidateRepoResponse struct {
|
||||
|
||||
// if validate error,return error code
|
||||
ErrorCode int64 `json:"errorCode,omitempty"`
|
||||
|
||||
// validate repository ok or not
|
||||
Ok bool `json:"ok,omitempty"`
|
||||
}
|
||||
|
||||
type CreateClusterRequest struct {
|
||||
|
||||
// release name
|
||||
Name string `json:"name"`
|
||||
|
||||
// release install description
|
||||
Description string `json:"description"`
|
||||
|
||||
// advanced param
|
||||
AdvancedParam []string `json:"advanced_param"`
|
||||
|
||||
// required, id of app to run in cluster
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// required, conf a json string, include cpu, memory info of cluster
|
||||
Conf string `json:"conf,omitempty"`
|
||||
|
||||
// required, id of runtime
|
||||
RuntimeId string `json:"runtime_id,omitempty"`
|
||||
|
||||
// required, id of app version
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
|
||||
Username string `json:"-"`
|
||||
|
||||
// current workspace
|
||||
Workspace string `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type UpgradeClusterRequest struct {
|
||||
// release namespace
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
// cluster id
|
||||
ClusterId string `json:"cluster_id"`
|
||||
|
||||
// helm app id
|
||||
AppId string `json:"app_id"`
|
||||
|
||||
// advanced param
|
||||
AdvancedParam []string `json:"advanced_param"`
|
||||
|
||||
// required, conf a json string, include cpu, memory info of cluster
|
||||
Conf string `json:"conf,omitempty"`
|
||||
|
||||
// Deprecated: required, id of runtime
|
||||
RuntimeId string `json:"runtime_id,omitempty"`
|
||||
|
||||
// required, id of app version
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
|
||||
Username string `json:"-"`
|
||||
}
|
||||
|
||||
type Cluster struct {
|
||||
|
||||
// additional info
|
||||
AdditionalInfo string `json:"additional_info,omitempty"`
|
||||
|
||||
// id of app run in cluster
|
||||
AppId string `json:"app_id,omitempty"`
|
||||
|
||||
// cluster id
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
|
||||
// cluster type, frontgate or normal cluster
|
||||
ClusterType int64 `json:"cluster_type,omitempty"`
|
||||
|
||||
// the time when cluster create
|
||||
CreateTime *strfmt.DateTime `json:"create_time,omitempty"`
|
||||
|
||||
// cluster used to debug or not
|
||||
Debug bool `json:"debug,omitempty"`
|
||||
|
||||
// cluster description
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// endpoint of cluster
|
||||
Endpoints string `json:"endpoints,omitempty"`
|
||||
|
||||
// cluster env
|
||||
Env string `json:"env,omitempty"`
|
||||
|
||||
// frontgate id, a proxy for vpc to communicate
|
||||
FrontgateId string `json:"frontgate_id,omitempty"`
|
||||
|
||||
// global uuid
|
||||
GlobalUUID string `json:"global_uuid,omitempty"`
|
||||
|
||||
// metadata root access
|
||||
MetadataRootAccess bool `json:"metadata_root_access,omitempty"`
|
||||
|
||||
// cluster name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// owner
|
||||
Owner string `json:"owner,omitempty"`
|
||||
|
||||
// cluster runtime id
|
||||
RuntimeId string `json:"runtime_id,omitempty"`
|
||||
|
||||
// cluster status eg.[active|used|enabled|disabled|deleted|stopped|ceased]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
StatusTime *strfmt.DateTime `json:"status_time,omitempty"`
|
||||
|
||||
// subnet id, cluster run in a subnet
|
||||
SubnetId string `json:"subnet_id,omitempty"`
|
||||
|
||||
// cluster transition status eg.[creating|deleting|upgrading|updating|rollbacking|stopping|starting|recovering|ceasing|resizing|scaling]
|
||||
TransitionStatus string `json:"transition_status,omitempty"`
|
||||
|
||||
// upgrade status, unused
|
||||
UpgradeStatus string `json:"upgrade_status,omitempty"`
|
||||
|
||||
// cluster upgraded time
|
||||
UpgradeTime *strfmt.DateTime `json:"upgrade_time,omitempty"`
|
||||
|
||||
// id of version of app run in cluster
|
||||
VersionId string `json:"version_id,omitempty"`
|
||||
|
||||
// vpc id, a vpc contain one more subnet
|
||||
VpcId string `json:"vpc_id,omitempty"`
|
||||
|
||||
// zone of cluster eg.[pek3a|pek3b]
|
||||
Zone string `json:"zone,omitempty"`
|
||||
}
|
||||
|
||||
type Runtime struct {
|
||||
// runtime id
|
||||
RuntimeId string `protobuf:"bytes,1,opt,name=runtime_id,json=runtimeId,proto3" json:"runtime_id,omitempty"`
|
||||
// runtime name,create by owner.
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// runtime description
|
||||
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
|
||||
}
|
||||
|
||||
type ModifyClusterAttributesRequest struct {
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
// required, id of cluster to modify
|
||||
ClusterID string `json:"cluster_id"`
|
||||
|
||||
// cluster description
|
||||
Description *string `json:"description,omitempty"`
|
||||
|
||||
// cluster name
|
||||
Name *string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
CreateTime = "create_time"
|
||||
StatusTime = "status_time"
|
||||
|
||||
VersionId = "version_id"
|
||||
RepoId = "repo_id"
|
||||
CategoryId = "category_id"
|
||||
Status = "status"
|
||||
Type = "type"
|
||||
Visibility = "visibility"
|
||||
AppId = "app_id"
|
||||
Keyword = "keyword"
|
||||
ISV = "isv"
|
||||
WorkspaceLabel = "workspace"
|
||||
BuiltinRepoId = "repo-helm"
|
||||
StatusActive = "active"
|
||||
StatusSuspended = "suspended"
|
||||
ActionRecover = "recover"
|
||||
ActionSuspend = "suspend"
|
||||
ActionCancel = "cancel"
|
||||
ActionPass = "pass"
|
||||
ActionReject = "reject"
|
||||
ActionSubmit = "submit"
|
||||
ActionRelease = "release"
|
||||
Ascending = "ascending"
|
||||
ActionIndex = "index"
|
||||
)
|
||||
@@ -1,839 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openpitrix
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/go-openapi/strfmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/server/params"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
func convertRepoEvent(meta *metav1.ObjectMeta, state *v1alpha1.HelmRepoSyncState) *RepoEvent {
|
||||
if meta == nil || state == nil {
|
||||
return nil
|
||||
}
|
||||
out := RepoEvent{}
|
||||
date := strfmt.DateTime(time.Unix(state.SyncTime.Unix(), 0))
|
||||
out.CreateTime = &date
|
||||
out.RepoId = meta.Name
|
||||
out.RepoEventId = ""
|
||||
out.Result = state.Message
|
||||
out.Status = state.State
|
||||
out.StatusTime = out.CreateTime
|
||||
|
||||
return &out
|
||||
}
|
||||
|
||||
func convertAppVersionAudit(appVersion *v1alpha1.HelmApplicationVersion) []*AppVersionAudit {
|
||||
if appVersion == nil {
|
||||
return nil
|
||||
}
|
||||
var audits []*AppVersionAudit
|
||||
for _, helmAudit := range appVersion.Status.Audit {
|
||||
var audit AppVersionAudit
|
||||
audit.AppId = appVersion.GetHelmApplicationId()
|
||||
audit.Operator = helmAudit.Operator
|
||||
audit.Message = helmAudit.Message
|
||||
audit.Status = helmAudit.State
|
||||
date := strfmt.DateTime(time.Unix(helmAudit.Time.Unix(), 0))
|
||||
audit.StatusTime = &date
|
||||
audit.VersionId = appVersion.Name
|
||||
audit.VersionType = "helm"
|
||||
audit.VersionName = appVersion.GetVersionName()
|
||||
audit.Operator = helmAudit.Operator
|
||||
audit.OperatorType = helmAudit.OperatorType
|
||||
|
||||
audits = append(audits, &audit)
|
||||
}
|
||||
|
||||
return audits
|
||||
}
|
||||
|
||||
type HelmReleaseList []*v1alpha1.HelmRelease
|
||||
|
||||
func (l HelmReleaseList) Len() int { return len(l) }
|
||||
func (l HelmReleaseList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l HelmReleaseList) Less(i, j int) bool {
|
||||
var t1, t2 time.Time
|
||||
if l[i].Status.LastDeployed == nil {
|
||||
t1 = l[i].CreationTimestamp.Time
|
||||
} else {
|
||||
t1 = l[i].Status.LastDeployed.Time
|
||||
}
|
||||
|
||||
if l[j].Status.LastDeployed == nil {
|
||||
t2 = l[j].CreationTimestamp.Time
|
||||
} else {
|
||||
t2 = l[j].Status.LastDeployed.Time
|
||||
}
|
||||
|
||||
if t1.After(t2) {
|
||||
return true
|
||||
} else if t1.Before(t2) {
|
||||
return false
|
||||
} else {
|
||||
return l[i].Name > l[j].Name
|
||||
}
|
||||
}
|
||||
|
||||
type AppVersionAuditList []*AppVersionAudit
|
||||
|
||||
func (l AppVersionAuditList) Len() int { return len(l) }
|
||||
func (l AppVersionAuditList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l AppVersionAuditList) Less(i, j int) bool {
|
||||
t1 := l[i].StatusTime.String()
|
||||
t2 := l[j].StatusTime.String()
|
||||
if t1 > t2 {
|
||||
return true
|
||||
} else if t1 < t2 {
|
||||
return false
|
||||
} else {
|
||||
n1 := l[i].VersionName
|
||||
n2 := l[j].VersionName
|
||||
return n1 < n2
|
||||
}
|
||||
}
|
||||
|
||||
// copy from openpitrix
|
||||
func matchPackageFailedError(err error, res *ValidatePackageResponse) {
|
||||
var errStr = err.Error()
|
||||
var matchedError = ""
|
||||
var errorDetails = make(map[string]string)
|
||||
switch {
|
||||
// Helm errors
|
||||
case strings.HasPrefix(errStr, "no files in chart archive"),
|
||||
strings.HasPrefix(errStr, "no files in app archive"):
|
||||
|
||||
matchedError = "no files in package"
|
||||
|
||||
case strings.HasPrefix(errStr, "chart yaml not in base directory"),
|
||||
strings.HasPrefix(errStr, "chart metadata (Chart.yaml) missing"):
|
||||
|
||||
errorDetails["Chart.yaml"] = "not found"
|
||||
|
||||
case strings.HasPrefix(errStr, "invalid chart (Chart.yaml): name must not be empty"):
|
||||
|
||||
errorDetails["Chart.yaml"] = "package name must not be empty"
|
||||
|
||||
case strings.HasPrefix(errStr, "values.toml is illegal"):
|
||||
|
||||
errorDetails["values.toml"] = errStr
|
||||
|
||||
case strings.HasPrefix(errStr, "error reading"):
|
||||
|
||||
matched := regexp.MustCompile("error reading (.+): (.+)").FindStringSubmatch(errStr)
|
||||
if len(matched) > 0 {
|
||||
errorDetails[matched[1]] = matched[2]
|
||||
}
|
||||
|
||||
// Devkit errors
|
||||
case strings.HasPrefix(errStr, "[package.json] not in base directory"):
|
||||
|
||||
errorDetails["package.json"] = "not found"
|
||||
|
||||
case strings.HasPrefix(errStr, "missing file ["):
|
||||
|
||||
matched := regexp.MustCompile(`missing file \\[(.+)]`).FindStringSubmatch(errStr)
|
||||
if len(matched) > 0 {
|
||||
errorDetails[matched[1]] = "not found"
|
||||
}
|
||||
|
||||
case strings.HasPrefix(errStr, "failed to parse"),
|
||||
strings.HasPrefix(errStr, "failed to render"),
|
||||
strings.HasPrefix(errStr, "failed to load"),
|
||||
strings.HasPrefix(errStr, "failed to decode"):
|
||||
|
||||
matched := regexp.MustCompile("failed to (.+) (.+): (.+)").FindStringSubmatch(errStr)
|
||||
if len(matched) > 0 {
|
||||
errorDetails[matched[2]] = fmt.Sprintf("%s failed, %s", matched[1], matched[3])
|
||||
}
|
||||
|
||||
default:
|
||||
matchedError = errStr
|
||||
}
|
||||
if len(errorDetails) > 0 {
|
||||
res.ErrorDetails = errorDetails
|
||||
}
|
||||
if len(matchedError) > 0 {
|
||||
res.Error = matchedError
|
||||
}
|
||||
}
|
||||
|
||||
func convertCategory(in *v1alpha1.HelmCategory) *Category {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := &Category{}
|
||||
out.Description = in.Spec.Description
|
||||
out.Name = in.Spec.Name
|
||||
out.CategoryID = in.Name
|
||||
t := strfmt.DateTime(in.CreationTimestamp.Time)
|
||||
out.CreateTime = &t
|
||||
if in.Spec.Locale == "" {
|
||||
out.Locale = "{}"
|
||||
} else {
|
||||
out.Locale = in.Spec.Locale
|
||||
}
|
||||
total := in.Status.Total
|
||||
out.AppTotal = &total
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func convertApplication(rls *v1alpha1.HelmRelease, rlsInfos []*resource.Info) *Application {
|
||||
app := &Application{}
|
||||
app.Name = rls.Spec.ChartName
|
||||
cluster := &Cluster{}
|
||||
cluster.ClusterId = rls.Name
|
||||
cluster.Owner = rls.GetCreator()
|
||||
cluster.Zone = rls.GetRlsNamespace()
|
||||
cluster.Status = rls.Status.State
|
||||
cluster.Env = string(rls.Spec.Values)
|
||||
if cluster.Status == "" {
|
||||
cluster.Status = v1alpha1.HelmStatusCreating
|
||||
}
|
||||
cluster.AdditionalInfo = rls.Status.Message
|
||||
cluster.Description = rls.Spec.Description
|
||||
dt := strfmt.DateTime(rls.CreationTimestamp.Time)
|
||||
cluster.CreateTime = &dt
|
||||
if rls.Status.LastDeployed != nil {
|
||||
ut := strfmt.DateTime(rls.Status.LastDeployed.Time)
|
||||
cluster.StatusTime = &ut
|
||||
} else {
|
||||
cluster.StatusTime = &dt
|
||||
}
|
||||
cluster.AppId = rls.Spec.ApplicationId
|
||||
cluster.VersionId = rls.Spec.ApplicationVersionId
|
||||
cluster.Name = rls.GetTrueName()
|
||||
cluster.AdditionalInfo = rls.Status.Message
|
||||
|
||||
if rls.GetRlsCluster() != "" {
|
||||
cluster.RuntimeId = rls.GetRlsCluster()
|
||||
} else {
|
||||
cluster.RuntimeId = "default"
|
||||
}
|
||||
|
||||
app.Cluster = cluster
|
||||
app.Version = &AppVersion{
|
||||
AppId: rls.Spec.ApplicationId,
|
||||
VersionId: rls.Spec.ApplicationVersionId,
|
||||
Name: rls.GetChartVersionName(),
|
||||
}
|
||||
app.App = &App{
|
||||
AppId: rls.Spec.ApplicationId,
|
||||
ChartName: rls.Spec.ChartName,
|
||||
Name: rls.Spec.ChartName,
|
||||
}
|
||||
|
||||
app.ReleaseInfo = make([]runtime.Object, 0, len(rlsInfos))
|
||||
for _, info := range rlsInfos {
|
||||
app.ReleaseInfo = append(app.ReleaseInfo, info.Object)
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func convertApp(app *v1alpha1.HelmApplication, versions []*v1alpha1.HelmApplicationVersion, ctg *v1alpha1.HelmCategory, rlsCount int) *App {
|
||||
if app == nil {
|
||||
return nil
|
||||
}
|
||||
out := &App{}
|
||||
|
||||
out.AppId = app.Name
|
||||
out.Name = app.GetTrueName()
|
||||
|
||||
date := strfmt.DateTime(app.CreationTimestamp.Time)
|
||||
out.CreateTime = &date
|
||||
if app.Status.StatusTime != nil {
|
||||
s := strfmt.DateTime(app.Status.StatusTime.Time)
|
||||
out.StatusTime = &s
|
||||
} else {
|
||||
out.StatusTime = out.CreateTime
|
||||
}
|
||||
|
||||
if app.Status.UpdateTime == nil {
|
||||
out.UpdateTime = out.CreateTime
|
||||
} else {
|
||||
u := strfmt.DateTime(app.Status.UpdateTime.Time)
|
||||
out.UpdateTime = &u
|
||||
}
|
||||
|
||||
out.Status = app.Status.State
|
||||
if out.Status == "" {
|
||||
out.Status = v1alpha1.StateDraft
|
||||
}
|
||||
out.Abstraction = app.Spec.Abstraction
|
||||
out.Description = app.Spec.Description
|
||||
|
||||
if len(app.Spec.Attachments) > 0 {
|
||||
out.Screenshots = strings.Join(app.Spec.Attachments, ",")
|
||||
}
|
||||
out.Home = app.Spec.AppHome
|
||||
out.Icon = app.Spec.Icon
|
||||
|
||||
if ctg != nil {
|
||||
ct := strfmt.DateTime(ctg.CreationTimestamp.Time)
|
||||
rc := ResourceCategory{
|
||||
CategoryId: ctg.Name,
|
||||
Name: ctg.GetTrueName(),
|
||||
CreateTime: &ct,
|
||||
Locale: ctg.Spec.Locale,
|
||||
}
|
||||
if ctg.Spec.Locale == "" {
|
||||
rc.Locale = "{}"
|
||||
} else {
|
||||
rc.Locale = ctg.Spec.Locale
|
||||
}
|
||||
rc.Status = "enabled"
|
||||
|
||||
out.CategorySet = AppCategorySet{&rc}
|
||||
} else {
|
||||
out.CategorySet = AppCategorySet{}
|
||||
}
|
||||
|
||||
for _, version := range versions {
|
||||
if app.Status.LatestVersion == version.GetVersionName() {
|
||||
// find the latest version, and convert its format
|
||||
out.LatestAppVersion = convertAppVersion(version)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if out.LatestAppVersion == nil {
|
||||
out.LatestAppVersion = &AppVersion{}
|
||||
}
|
||||
|
||||
out.AppVersionTypes = "helm"
|
||||
// If this keys exists, the workspace of this app has been deleted, set the isv to empty.
|
||||
if _, exists := app.Annotations[constants.DanglingAppCleanupKey]; !exists {
|
||||
out.Isv = app.GetWorkspace()
|
||||
}
|
||||
|
||||
out.ClusterTotal = &rlsCount
|
||||
out.Owner = app.GetCreator()
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func filterAppVersionByState(versions []*v1alpha1.HelmApplicationVersion, states []string) []*v1alpha1.HelmApplicationVersion {
|
||||
if len(states) == 0 {
|
||||
return versions
|
||||
}
|
||||
|
||||
var j = 0
|
||||
for i := 0; i < len(versions); i++ {
|
||||
state := versions[i].State()
|
||||
if sliceutil.HasString(states, state) {
|
||||
if i != j {
|
||||
versions[j] = versions[i]
|
||||
}
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
versions = versions[:j:j]
|
||||
return versions
|
||||
}
|
||||
|
||||
func convertAppVersion(in *v1alpha1.HelmApplicationVersion) *AppVersion {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := AppVersion{}
|
||||
out.AppId = in.GetHelmApplicationId()
|
||||
out.Active = true
|
||||
t := in.CreationTimestamp.Time
|
||||
date := strfmt.DateTime(t)
|
||||
out.CreateTime = &date
|
||||
if len(in.Status.Audit) > 0 {
|
||||
t = in.Status.Audit[0].Time.Time
|
||||
changeTime := strfmt.DateTime(t)
|
||||
out.StatusTime = &changeTime
|
||||
} else {
|
||||
out.StatusTime = &date
|
||||
}
|
||||
|
||||
// chart create time or update time
|
||||
if in.Spec.Created != nil {
|
||||
updateTime := strfmt.DateTime(in.Spec.Created.Time)
|
||||
out.UpdateTime = &updateTime
|
||||
} else {
|
||||
// Charts in the repo are without this field
|
||||
out.UpdateTime = &date
|
||||
}
|
||||
|
||||
if in.Spec.Metadata != nil {
|
||||
out.Description = in.Spec.Description
|
||||
out.Icon = in.Spec.Icon
|
||||
out.Home = in.Spec.Home
|
||||
}
|
||||
|
||||
// The field Maintainers and Sources were a string field, so I encode the helm field's maintainers and sources,
|
||||
// which are array, to string.
|
||||
if len(in.Spec.Maintainers) > 0 {
|
||||
maintainers, _ := json.Marshal(in.Spec.Maintainers)
|
||||
out.Maintainers = string(maintainers)
|
||||
}
|
||||
|
||||
if len(in.Spec.Sources) > 0 {
|
||||
source, _ := json.Marshal(in.Spec.Sources)
|
||||
out.Sources = string(source)
|
||||
}
|
||||
|
||||
out.Status = in.State()
|
||||
out.Owner = in.GetCreator()
|
||||
out.Name = in.GetVersionName()
|
||||
out.PackageName = fmt.Sprintf("%s-%s.tgz", in.GetTrueName(), in.GetChartVersion())
|
||||
out.VersionId = in.GetHelmApplicationVersionId()
|
||||
return &out
|
||||
}
|
||||
|
||||
func convertRepo(in *v1alpha1.HelmRepo) *Repo {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := Repo{}
|
||||
|
||||
out.RepoId = in.GetHelmRepoId()
|
||||
out.Name = in.GetTrueName()
|
||||
|
||||
out.Status = in.Status.State
|
||||
// set default status `syncing` when helmrepo not reconcile yet
|
||||
if out.Status == "" {
|
||||
out.Status = v1alpha1.RepoStateSyncing
|
||||
}
|
||||
date := strfmt.DateTime(time.Unix(in.CreationTimestamp.Unix(), 0))
|
||||
out.CreateTime = &date
|
||||
|
||||
out.Description = in.Spec.Description
|
||||
out.Creator = in.GetCreator()
|
||||
|
||||
cred, _ := json.Marshal(in.Spec.Credential)
|
||||
out.Credential = string(cred)
|
||||
out.SyncPeriod = in.Annotations[v1alpha1.RepoSyncPeriod]
|
||||
|
||||
out.URL = in.Spec.Url
|
||||
return &out
|
||||
}
|
||||
|
||||
type HelmCategoryList []*v1alpha1.HelmCategory
|
||||
|
||||
func (l HelmCategoryList) Len() int { return len(l) }
|
||||
func (l HelmCategoryList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l HelmCategoryList) Less(i, j int) bool {
|
||||
t1 := l[i].CreationTimestamp.UnixNano()
|
||||
t2 := l[j].CreationTimestamp.UnixNano()
|
||||
if t1 > t2 {
|
||||
return true
|
||||
} else if t1 < t2 {
|
||||
return false
|
||||
} else {
|
||||
n1 := l[i].Spec.Name
|
||||
n2 := l[j].Spec.Name
|
||||
return n1 < n2
|
||||
}
|
||||
}
|
||||
|
||||
type HelmApplicationList []*v1alpha1.HelmApplication
|
||||
|
||||
func (l HelmApplicationList) Len() int { return len(l) }
|
||||
func (l HelmApplicationList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l HelmApplicationList) Less(i, j int) bool {
|
||||
t1 := l[i].CreationTimestamp.UnixNano()
|
||||
t2 := l[j].CreationTimestamp.UnixNano()
|
||||
if t1 < t2 {
|
||||
return true
|
||||
} else if t1 > t2 {
|
||||
return false
|
||||
} else {
|
||||
n1 := l[i].GetTrueName()
|
||||
n2 := l[j].GetTrueName()
|
||||
return n1 < n2
|
||||
}
|
||||
}
|
||||
|
||||
type AppVersionReviews []*v1alpha1.HelmApplicationVersion
|
||||
|
||||
// Len returns the length.
|
||||
func (c AppVersionReviews) Len() int { return len(c) }
|
||||
|
||||
// Swap swaps the position of two items in the versions slice.
|
||||
func (c AppVersionReviews) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
|
||||
// Less returns true if the version of entry a is less than the version of entry b.
|
||||
func (c AppVersionReviews) Less(a, b int) bool {
|
||||
aVersion := c[a]
|
||||
bVersion := c[b]
|
||||
|
||||
if len(aVersion.Status.Audit) > 0 && len(bVersion.Status.Audit) > 0 {
|
||||
t1 := aVersion.Status.Audit[0].Time
|
||||
t2 := bVersion.Status.Audit[0].Time
|
||||
if t1.Before(&t2) {
|
||||
return true
|
||||
} else if t2.Before(&t1) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
i, err := semver.NewVersion(aVersion.GetSemver())
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
j, err := semver.NewVersion(bVersion.GetSemver())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if i.Equal(j) {
|
||||
return aVersion.CreationTimestamp.Before(&bVersion.CreationTimestamp)
|
||||
}
|
||||
return j.LessThan(i)
|
||||
}
|
||||
|
||||
type AppVersions []*v1alpha1.HelmApplicationVersion
|
||||
|
||||
// Len returns the length.
|
||||
func (c AppVersions) Len() int { return len(c) }
|
||||
|
||||
// Swap swaps the position of two items in the versions slice.
|
||||
func (c AppVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
|
||||
// Less returns true if the version of entry a is less than the version of entry b.
|
||||
func (c AppVersions) Less(a, b int) bool {
|
||||
// Failed parse pushes to the back.
|
||||
aVersion := c[a]
|
||||
bVersion := c[b]
|
||||
i, err := semver.NewVersion(aVersion.GetSemver())
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
j, err := semver.NewVersion(bVersion.GetSemver())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if i.Equal(j) {
|
||||
return aVersion.CreationTimestamp.Before(&bVersion.CreationTimestamp)
|
||||
}
|
||||
return i.LessThan(j)
|
||||
}
|
||||
|
||||
// buildApplicationVersion build an application version
|
||||
// packageData base64 encoded package data
|
||||
func buildApplicationVersion(app *v1alpha1.HelmApplication, chrt helmrepoindex.VersionInterface, chartPackage *string, creator string) *v1alpha1.HelmApplicationVersion {
|
||||
// create helm application version resource
|
||||
t := metav1.Now()
|
||||
ver := &v1alpha1.HelmApplicationVersion{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
constants.CreatorAnnotationKey: creator,
|
||||
},
|
||||
Name: idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix),
|
||||
Labels: map[string]string{
|
||||
constants.ChartApplicationIdLabelKey: app.GetHelmApplicationId(),
|
||||
constants.WorkspaceLabelKey: app.GetWorkspace(),
|
||||
},
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
UID: app.GetUID(),
|
||||
APIVersion: v1alpha1.SchemeGroupVersion.String(),
|
||||
Kind: "HelmApplication",
|
||||
Name: app.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.HelmApplicationVersionSpec{
|
||||
Metadata: &v1alpha1.Metadata{
|
||||
Version: chrt.GetVersion(),
|
||||
AppVersion: chrt.GetAppVersion(),
|
||||
Name: chrt.GetName(),
|
||||
Icon: chrt.GetIcon(),
|
||||
Home: chrt.GetHome(),
|
||||
Description: stringutils.ShortenString(chrt.GetDescription(), v1alpha1.MsgLen),
|
||||
Sources: chrt.GetRawSources(),
|
||||
Maintainers: chrt.GetRawMaintainers(),
|
||||
},
|
||||
Created: &t,
|
||||
// set data to nil before save app version to etcd
|
||||
Data: []byte(*chartPackage),
|
||||
},
|
||||
Status: v1alpha1.HelmApplicationVersionStatus{
|
||||
State: v1alpha1.StateDraft,
|
||||
Audit: []v1alpha1.Audit{
|
||||
{
|
||||
State: v1alpha1.StateDraft,
|
||||
Time: t,
|
||||
Operator: creator,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return ver
|
||||
}
|
||||
|
||||
func filterAppByName(app *v1alpha1.HelmApplication, namePart string) bool {
|
||||
if len(namePart) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
name := app.GetTrueName()
|
||||
return strings.Contains(strings.ToLower(name), strings.ToLower(namePart))
|
||||
}
|
||||
|
||||
func filterAppByStates(app *v1alpha1.HelmApplication, state []string) bool {
|
||||
if len(state) == 0 {
|
||||
return true
|
||||
}
|
||||
st := app.Status.State
|
||||
// default value is draft
|
||||
if st == "" {
|
||||
st = v1alpha1.StateDraft
|
||||
}
|
||||
if sliceutil.HasString(state, st) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func filterAppReviews(versions []*v1alpha1.HelmApplicationVersion, conditions *params.Conditions) []*v1alpha1.HelmApplicationVersion {
|
||||
if conditions == nil || len(conditions.Match) == 0 || len(versions) == 0 {
|
||||
return versions
|
||||
}
|
||||
|
||||
curr := 0
|
||||
for i := 0; i < len(versions); i++ {
|
||||
if conditions.Match[Keyword] != "" {
|
||||
if !(strings.Contains(strings.ToLower(versions[i].Spec.Name), strings.ToLower(conditions.Match[Keyword]))) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if conditions.Match[Status] != "" {
|
||||
states := strings.Split(conditions.Match[Status], "|")
|
||||
state := versions[i].State()
|
||||
if !sliceutil.HasString(states, state) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if curr != i {
|
||||
versions[curr] = versions[i]
|
||||
}
|
||||
curr++
|
||||
}
|
||||
|
||||
return versions[:curr:curr]
|
||||
}
|
||||
|
||||
func filterAppVersions(versions []*v1alpha1.HelmApplicationVersion, conditions *params.Conditions) []*v1alpha1.HelmApplicationVersion {
|
||||
if conditions == nil || len(conditions.Match) == 0 || len(versions) == 0 {
|
||||
return versions
|
||||
}
|
||||
|
||||
curr := 0
|
||||
for i := 0; i < len(versions); i++ {
|
||||
if conditions.Match[Keyword] != "" {
|
||||
if !(strings.Contains(strings.ToLower(versions[i].Spec.Version), strings.ToLower(conditions.Match[Keyword])) ||
|
||||
strings.Contains(strings.ToLower(versions[i].Spec.AppVersion), strings.ToLower(conditions.Match[Keyword]))) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if conditions.Match[Status] != "" {
|
||||
states := strings.Split(conditions.Match[Status], "|")
|
||||
state := versions[i].State()
|
||||
if !sliceutil.HasString(states, state) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if curr != i {
|
||||
versions[curr] = versions[i]
|
||||
}
|
||||
curr++
|
||||
}
|
||||
|
||||
return versions[:curr:curr]
|
||||
}
|
||||
|
||||
func filterApps(apps []*v1alpha1.HelmApplication, conditions *params.Conditions) []*v1alpha1.HelmApplication {
|
||||
if conditions == nil || len(conditions.Match) == 0 || len(apps) == 0 {
|
||||
return apps
|
||||
}
|
||||
|
||||
// filter app by param app_id
|
||||
appIdMap := make(map[string]string)
|
||||
if len(conditions.Match[AppId]) > 0 {
|
||||
ids := strings.Split(conditions.Match[AppId], "|")
|
||||
for _, id := range ids {
|
||||
if len(id) > 0 {
|
||||
appIdMap[id] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
curr := 0
|
||||
for i := 0; i < len(apps); i++ {
|
||||
if conditions.Match[Keyword] != "" {
|
||||
fv := filterAppByName(apps[i], conditions.Match[Keyword])
|
||||
if !fv {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(appIdMap) > 0 {
|
||||
if _, exists := appIdMap[apps[i].Name]; !exists {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if conditions.Match[Status] != "" {
|
||||
states := strings.Split(conditions.Match[Status], "|")
|
||||
fv := filterAppByStates(apps[i], states)
|
||||
if !fv {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if curr != i {
|
||||
apps[curr] = apps[i]
|
||||
}
|
||||
curr++
|
||||
}
|
||||
|
||||
return apps[:curr:curr]
|
||||
}
|
||||
|
||||
func filterReleaseByStates(rls *v1alpha1.HelmRelease, state []string) bool {
|
||||
if len(state) == 0 {
|
||||
return true
|
||||
}
|
||||
st := rls.Status.State
|
||||
if st == "" {
|
||||
st = v1alpha1.HelmStatusCreating
|
||||
}
|
||||
if sliceutil.HasString(state, st) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func filterReleasesWithAppVersions(releases []*v1alpha1.HelmRelease, appVersions map[string]*v1alpha1.HelmApplicationVersion) []*v1alpha1.HelmRelease {
|
||||
if len(appVersions) == 0 || len(releases) == 0 {
|
||||
return []*v1alpha1.HelmRelease{}
|
||||
}
|
||||
|
||||
curr := 0
|
||||
for i := 0; i < len(releases); i++ {
|
||||
if _, exists := appVersions[releases[i].Spec.ApplicationVersionId]; exists {
|
||||
if curr != i {
|
||||
releases[curr] = releases[i]
|
||||
}
|
||||
curr++
|
||||
}
|
||||
}
|
||||
|
||||
return releases[:curr:curr]
|
||||
}
|
||||
|
||||
func filterReleases(releases []*v1alpha1.HelmRelease, conditions *params.Conditions) []*v1alpha1.HelmRelease {
|
||||
if conditions == nil || len(conditions.Match) == 0 || len(releases) == 0 {
|
||||
return releases
|
||||
}
|
||||
|
||||
curr := 0
|
||||
for i := 0; i < len(releases); i++ {
|
||||
keyword := strings.ToLower(conditions.Match[Keyword])
|
||||
if keyword != "" {
|
||||
fv := strings.Contains(strings.ToLower(releases[i].GetTrueName()), keyword) ||
|
||||
strings.Contains(strings.ToLower(releases[i].Spec.ChartVersion), keyword) ||
|
||||
strings.Contains(strings.ToLower(releases[i].Spec.ChartAppVersion), keyword)
|
||||
if !fv {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if conditions.Match[Status] != "" {
|
||||
states := strings.Split(conditions.Match[Status], "|")
|
||||
fv := filterReleaseByStates(releases[i], states)
|
||||
if !fv {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if curr != i {
|
||||
releases[curr] = releases[i]
|
||||
}
|
||||
curr++
|
||||
}
|
||||
|
||||
return releases[:curr:curr]
|
||||
}
|
||||
|
||||
func dataKeyInStorage(workspace, id string) string {
|
||||
return path.Join(workspace, id)
|
||||
}
|
||||
|
||||
func convertAppVersionReview(app *v1alpha1.HelmApplication, appVersion *v1alpha1.HelmApplicationVersion) *AppVersionReview {
|
||||
review := &AppVersionReview{}
|
||||
status := appVersion.Status
|
||||
review.Reviewer = status.Audit[0].Operator
|
||||
review.ReviewId = status.Audit[0].Operator
|
||||
review.Status = appVersion.Status.State
|
||||
review.AppId = appVersion.GetHelmApplicationId()
|
||||
review.VersionID = appVersion.GetHelmApplicationVersionId()
|
||||
review.Phase = AppVersionReviewPhaseOAIGen{}
|
||||
review.VersionName = appVersion.GetVersionName()
|
||||
review.Workspace = appVersion.GetWorkspace()
|
||||
|
||||
review.StatusTime = strfmt.DateTime(status.Audit[0].Time.Time)
|
||||
review.AppName = app.GetTrueName()
|
||||
return review
|
||||
}
|
||||
|
||||
func parseChartVersionName(name string) (version, appVersion string) {
|
||||
name = strings.TrimSpace(name)
|
||||
if name == "" {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
parts := strings.Split(name, "[")
|
||||
if len(parts) == 1 {
|
||||
return parts[0], ""
|
||||
}
|
||||
|
||||
version = strings.TrimSpace(parts[0])
|
||||
|
||||
appVersion = strings.Trim(parts[1], "]")
|
||||
appVersion = strings.TrimSpace(appVersion)
|
||||
return
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/openpitrix/application"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/openpitrix/applicationversion"
|
||||
)
|
||||
|
||||
type ApplicationInterface interface {
|
||||
DescribeAppVersion(id string) (*v1alpha1.HelmApplicationVersion, error)
|
||||
DescribeApp(id string) (*v1alpha1.HelmApplication, error)
|
||||
|
||||
ListApps(workspace string, q *query.Query) (*api.ListResult, error)
|
||||
ListAppVersions(workspace, appId string, q *query.Query) (*api.ListResult, error)
|
||||
}
|
||||
|
||||
type applicationOperator struct {
|
||||
appsGetter resources.Interface
|
||||
appVersionGetter resources.Interface
|
||||
}
|
||||
|
||||
func newApplicationOperator(informers externalversions.SharedInformerFactory) ApplicationInterface {
|
||||
op := &applicationOperator{
|
||||
appsGetter: application.New(informers),
|
||||
appVersionGetter: applicationversion.New(informers),
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
func (c *applicationOperator) ListApps(workspace string, q *query.Query) (*api.ListResult, error) {
|
||||
|
||||
labelSelector, err := labels.ConvertSelectorToLabelsMap(q.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extra := labels.Set{}
|
||||
if workspace != "" {
|
||||
extra[constants.WorkspaceLabelKey] = workspace
|
||||
}
|
||||
|
||||
if len(extra) > 0 {
|
||||
q.LabelSelector = labels.Merge(labelSelector, extra).String()
|
||||
}
|
||||
|
||||
releases, err := c.appsGetter.List("", q)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list app failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return releases, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DescribeApp(verId string) (*v1alpha1.HelmApplication, error) {
|
||||
ret, err := c.appsGetter.Get("", verId)
|
||||
if err != nil {
|
||||
klog.Errorf("get app failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret.(*v1alpha1.HelmApplication), nil
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
func (c *applicationOperator) ListAppVersions(workspace, appId string, q *query.Query) (*api.ListResult, error) {
|
||||
|
||||
labelSelector, err := labels.ConvertSelectorToLabelsMap(q.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extra := labels.Set{}
|
||||
if workspace != "" {
|
||||
extra[constants.WorkspaceLabelKey] = workspace
|
||||
}
|
||||
|
||||
if appId != "" {
|
||||
extra[constants.ChartApplicationIdLabelKey] = appId
|
||||
}
|
||||
|
||||
if len(extra) > 0 {
|
||||
q.LabelSelector = labels.Merge(labelSelector, extra).String()
|
||||
}
|
||||
|
||||
releases, err := c.appVersionGetter.List("", q)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list app version failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return releases, nil
|
||||
}
|
||||
|
||||
func (c *applicationOperator) DescribeAppVersion(verId string) (*v1alpha1.HelmApplicationVersion, error) {
|
||||
ret, err := c.appVersionGetter.Get("", verId)
|
||||
if err != nil {
|
||||
klog.Errorf("get app version failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret.(*v1alpha1.HelmApplicationVersion), nil
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/openpitrix/category"
|
||||
)
|
||||
|
||||
type CategoryInterface interface {
|
||||
ListCategories(q *query.Query) (*api.ListResult, error)
|
||||
DescribeCategory(id string) (*v1alpha1.HelmCategory, error)
|
||||
}
|
||||
|
||||
type categoryOperator struct {
|
||||
ctgGetter resources.Interface
|
||||
}
|
||||
|
||||
func newCategoryOperator(ksFactory externalversions.SharedInformerFactory) CategoryInterface {
|
||||
c := &categoryOperator{
|
||||
ctgGetter: category.New(ksFactory),
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *categoryOperator) DescribeCategory(id string) (*v1alpha1.HelmCategory, error) {
|
||||
ret, err := c.ctgGetter.Get("", id)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctg := ret.(*v1alpha1.HelmCategory)
|
||||
return ctg, nil
|
||||
}
|
||||
|
||||
func (c *categoryOperator) ListCategories(q *query.Query) (*api.ListResult, error) {
|
||||
|
||||
result, err := c.ctgGetter.List("", q)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/informers"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
ApplicationInterface
|
||||
RepoInterface
|
||||
HelmReleaseInterface
|
||||
CategoryInterface
|
||||
}
|
||||
|
||||
type openpitrixOperator struct {
|
||||
ApplicationInterface
|
||||
RepoInterface
|
||||
HelmReleaseInterface
|
||||
CategoryInterface
|
||||
}
|
||||
|
||||
func NewOpenPitrixOperator(ksInformers informers.InformerFactory) Interface {
|
||||
return &openpitrixOperator{
|
||||
ApplicationInterface: newApplicationOperator(ksInformers.KubeSphereSharedInformerFactory()),
|
||||
RepoInterface: newRepoOperator(ksInformers.KubeSphereSharedInformerFactory()),
|
||||
HelmReleaseInterface: newReleaseOperator(ksInformers.KubeSphereSharedInformerFactory()),
|
||||
CategoryInterface: newCategoryOperator(ksInformers.KubeSphereSharedInformerFactory()),
|
||||
}
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/openpitrix/helmrelease"
|
||||
)
|
||||
|
||||
type HelmReleaseInterface interface {
|
||||
DescribeApplication(workspace, clusterName, namespace, applicationId string) (*v1alpha1.HelmRelease, error)
|
||||
ListApplications(workspace, cluster, namespace string, q *query.Query) (*api.ListResult, error)
|
||||
}
|
||||
type releaseOperator struct {
|
||||
rlsGetter resources.Interface
|
||||
}
|
||||
|
||||
func newReleaseOperator(ksFactory externalversions.SharedInformerFactory) HelmReleaseInterface {
|
||||
c := &releaseOperator{
|
||||
rlsGetter: helmrelease.New(ksFactory),
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
func (c *releaseOperator) DescribeApplication(workspace, clusterName, namespace, applicationId string) (*v1alpha1.HelmRelease, error) {
|
||||
ret, err := c.rlsGetter.Get("", applicationId)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rls := ret.(*v1alpha1.HelmRelease)
|
||||
return rls, nil
|
||||
}
|
||||
|
||||
func (c *releaseOperator) ListApplications(workspace, cluster, namespace string, q *query.Query) (*api.ListResult, error) {
|
||||
|
||||
labelSelector, err := labels.ConvertSelectorToLabelsMap(q.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extra := labels.Set{}
|
||||
if workspace != "" {
|
||||
extra[constants.WorkspaceLabelKey] = workspace
|
||||
}
|
||||
|
||||
// cluster must used with namespace
|
||||
if cluster != "" {
|
||||
extra[constants.ClusterNameLabelKey] = cluster
|
||||
}
|
||||
if namespace != "" {
|
||||
extra[constants.NamespaceLabelKey] = namespace
|
||||
}
|
||||
if len(extra) > 0 {
|
||||
q.LabelSelector = labels.Merge(labelSelector, extra).String()
|
||||
}
|
||||
|
||||
releases, err := c.rlsGetter.List("", q)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("list app release failed, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return releases, nil
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/api/application/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/client/informers/externalversions"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
resources "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3"
|
||||
"kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/openpitrix/repo"
|
||||
)
|
||||
|
||||
type RepoInterface interface {
|
||||
ListRepos(workspace string, q *query.Query) (*api.ListResult, error)
|
||||
DescribeRepo(id string) (*v1alpha1.HelmRepo, error)
|
||||
}
|
||||
|
||||
type repoOperator struct {
|
||||
reposGetter resources.Interface
|
||||
}
|
||||
|
||||
func newRepoOperator(factory externalversions.SharedInformerFactory) RepoInterface {
|
||||
return &repoOperator{
|
||||
reposGetter: repo.New(factory),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *repoOperator) DescribeRepo(id string) (*v1alpha1.HelmRepo, error) {
|
||||
result, err := c.reposGetter.Get("", id)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo := result.(*v1alpha1.HelmRepo)
|
||||
repo.Status.Data = ""
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (c *repoOperator) ListRepos(workspace string, qry *query.Query) (result *api.ListResult, err error) {
|
||||
if workspace != "" {
|
||||
labelSelector, err := labels.ConvertSelectorToLabelsMap(qry.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
qry.LabelSelector = labels.Merge(labelSelector, labels.Set{constants.WorkspaceLabelKey: workspace}).String()
|
||||
}
|
||||
result, err = c.reposGetter.List("", qry)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// remove status data and credential
|
||||
for i := range result.Items {
|
||||
d := result.Items[i].(*v1alpha1.HelmRepo)
|
||||
d.Status.Data = ""
|
||||
d.Spec.Credential = v1alpha1.HelmRepoCredential{}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,30 +1,25 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package quotas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/klog/v2"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,57 +32,75 @@ const (
|
||||
persistentvolumeclaimsKey = "persistentvolumeclaims"
|
||||
jobsKey = "count/jobs.batch"
|
||||
cronJobsKey = "count/cronjobs.batch"
|
||||
s2iBuilders = "count/s2ibuilders.devops.kubesphere.io"
|
||||
)
|
||||
|
||||
var supportedResources = map[string]schema.GroupVersionResource{
|
||||
deploymentsKey: {Group: "apps", Version: "v1", Resource: "deployments"},
|
||||
daemonsetsKey: {Group: "apps", Version: "v1", Resource: "daemonsets"},
|
||||
statefulsetsKey: {Group: "apps", Version: "v1", Resource: "statefulsets"},
|
||||
podsKey: {Group: "", Version: "v1", Resource: "pods"},
|
||||
servicesKey: {Group: "", Version: "v1", Resource: "services"},
|
||||
persistentvolumeclaimsKey: {Group: "", Version: "v1", Resource: "persistentvolumeclaims"},
|
||||
ingressKey: {Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"},
|
||||
jobsKey: {Group: "batch", Version: "v1", Resource: "jobs"},
|
||||
cronJobsKey: {Group: "batch", Version: "v1", Resource: "cronjobs"},
|
||||
s2iBuilders: {Group: "devops.kubesphere.io", Version: "v1alpha1", Resource: "s2ibuilders"},
|
||||
}
|
||||
|
||||
type ResourceQuotaGetter interface {
|
||||
GetClusterQuota() (*api.ResourceQuota, error)
|
||||
GetNamespaceQuota(namespace string) (*api.NamespacedResourceQuota, error)
|
||||
}
|
||||
|
||||
type resourceQuotaGetter struct {
|
||||
informers informers.SharedInformerFactory
|
||||
client runtimeclient.Client
|
||||
supportedResources map[string]schema.GroupVersionKind
|
||||
}
|
||||
|
||||
func NewResourceQuotaGetter(informers informers.SharedInformerFactory) ResourceQuotaGetter {
|
||||
return &resourceQuotaGetter{informers: informers}
|
||||
func NewResourceQuotaGetter(client runtimeclient.Client, k8sVersion *semver.Version) ResourceQuotaGetter {
|
||||
supportedResources := map[string]schema.GroupVersionKind{
|
||||
deploymentsKey: {Group: "apps", Version: "v1", Kind: "DeploymentList"},
|
||||
daemonsetsKey: {Group: "apps", Version: "v1", Kind: "DaemonSetList"},
|
||||
statefulsetsKey: {Group: "apps", Version: "v1", Kind: "StatefulSetList"},
|
||||
podsKey: {Group: "", Version: "v1", Kind: "PodList"},
|
||||
servicesKey: {Group: "", Version: "v1", Kind: "ServiceList"},
|
||||
persistentvolumeclaimsKey: {Group: "", Version: "v1", Kind: "PersistentVolumeClaimList"},
|
||||
ingressKey: {Group: "networking.k8s.io", Version: "v1", Kind: "IngressList"},
|
||||
jobsKey: {Group: "batch", Version: "v1", Kind: "JobList"},
|
||||
cronJobsKey: {Group: "batch", Version: "v1", Kind: "CronJobList"},
|
||||
}
|
||||
if k8sutil.ServeBatchV1beta1(k8sVersion) {
|
||||
cronJobs := supportedResources[cronJobsKey]
|
||||
cronJobs.Version = "v1beta1"
|
||||
supportedResources[cronJobsKey] = cronJobs
|
||||
}
|
||||
|
||||
return &resourceQuotaGetter{client: client, supportedResources: supportedResources}
|
||||
}
|
||||
|
||||
func (c *resourceQuotaGetter) getUsage(namespace, resource string) (int, error) {
|
||||
|
||||
genericInformer, err := c.informers.ForResource(supportedResources[resource])
|
||||
if err != nil {
|
||||
// we deliberately ignore error if trying to get non existed resource
|
||||
return 0, nil
|
||||
var obj runtimeclient.ObjectList
|
||||
gvk, ok := c.supportedResources[resource]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("resource %s is not supported", resource)
|
||||
}
|
||||
|
||||
result, err := genericInformer.Lister().ByNamespace(namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
if c.client.Scheme().Recognizes(gvk) {
|
||||
gvkObject, err := c.client.Scheme().New(gvk)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
obj = gvkObject.(runtimeclient.ObjectList)
|
||||
} else {
|
||||
u := &unstructured.UnstructuredList{}
|
||||
u.SetGroupVersionKind(gvk)
|
||||
obj = u
|
||||
}
|
||||
|
||||
if err := c.client.List(context.Background(), obj, runtimeclient.InNamespace(namespace)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(result), nil
|
||||
items, err := meta.ExtractList(obj)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(items), nil
|
||||
}
|
||||
|
||||
// no one use this api anymore, marked as deprecated
|
||||
// GetClusterQuota no one use this api anymore, marked as deprecated
|
||||
func (c *resourceQuotaGetter) GetClusterQuota() (*api.ResourceQuota, error) {
|
||||
|
||||
quota := v1.ResourceQuotaStatus{Hard: make(v1.ResourceList), Used: make(v1.ResourceList)}
|
||||
|
||||
for r := range supportedResources {
|
||||
for r := range c.supportedResources {
|
||||
used, err := c.getUsage("", r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -126,7 +139,7 @@ func (c *resourceQuotaGetter) GetNamespaceQuota(namespace string) (*api.Namespac
|
||||
}
|
||||
|
||||
// add extra quota usage, cause user may not specify them
|
||||
for key := range supportedResources {
|
||||
for key := range c.supportedResources {
|
||||
// only add them when they don't exist in quotastatus
|
||||
if _, ok := quota.Used[v1.ResourceName(key)]; !ok {
|
||||
used, err := c.getUsage(namespace, key)
|
||||
@@ -166,18 +179,17 @@ func updateNamespaceQuota(tmpResourceList, resourceList v1.ResourceList) {
|
||||
}
|
||||
|
||||
func (c *resourceQuotaGetter) getNamespaceResourceQuota(namespace string) (*v1.ResourceQuotaStatus, error) {
|
||||
resourceQuotaLister := c.informers.Core().V1().ResourceQuotas().Lister()
|
||||
quotaList, err := resourceQuotaLister.ResourceQuotas(namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
resourceQuotaList := &v1.ResourceQuotaList{}
|
||||
if err := c.client.List(context.Background(), resourceQuotaList, runtimeclient.InNamespace(namespace)); err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
} else if len(quotaList) == 0 {
|
||||
} else if len(resourceQuotaList.Items) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
quotaStatus := v1.ResourceQuotaStatus{Hard: make(v1.ResourceList), Used: make(v1.ResourceList)}
|
||||
|
||||
for _, quota := range quotaList {
|
||||
for _, quota := range resourceQuotaList.Items {
|
||||
updateNamespaceQuota(quotaStatus.Hard, quota.Status.Hard)
|
||||
updateNamespaceQuota(quotaStatus.Used, quota.Status.Used)
|
||||
}
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package registries
|
||||
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package registries
|
||||
|
||||
@@ -21,7 +10,7 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package registries
|
||||
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package registries
|
||||
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package registries
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user